text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import unittest
from lib import cloudshell
from lib.TeraSortBenchmark import TeraSortBenchmark
class CloudStone8(TeraSortBenchmark):
"Tests variable length input keys and values"
keymin = 10
keymax = 50
valmin = 100
valmax = 500
rows = 1000000
tablename = 'VariableLengthIngestTable'
def shortDescription(self):
return 'Ingests %d rows of variable key and value length to be sorted. '\
'Lower score is better.' % (self.numrows())
def setSpeed(self, speed):
if speed == "slow":
self.rows = 1000000
self.keymin = 60
self.keymax = 100
self.valmin = 200
self.valmax = 300
self.numsplits = 400
elif speed == "medium":
self.rows = 100000
self.keymin = 40
self.keymax = 70
self.valmin = 130
self.valmax = 170
self.numsplits = 40
elif speed == "fast":
self.rows = 10000
self.keymin = 30
self.keymax = 50
self.valmin = 80
self.valmax = 100
self.numsplits = 4
def suite():
result = unittest.TestSuite([
CloudStone8(),
])
return result
|
{
"content_hash": "6898f833ff8c3dd48e14a9d116835080",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 26.520833333333332,
"alnum_prop": 0.5420267085624509,
"repo_name": "wjsl/jaredcumulo",
"id": "a02a0a1cae00605668e4e25772efe7d1b42d9477",
"size": "2056",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/system/bench/cloudstone8/cloudstone8.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "27323"
},
{
"name": "C++",
"bytes": "19683"
},
{
"name": "CSS",
"bytes": "8226"
},
{
"name": "Java",
"bytes": "13855810"
},
{
"name": "JavaScript",
"bytes": "249599"
},
{
"name": "Perl",
"bytes": "15899"
},
{
"name": "Python",
"bytes": "290137"
},
{
"name": "Ruby",
"bytes": "1844"
},
{
"name": "Shell",
"bytes": "166724"
},
{
"name": "TeX",
"bytes": "104446"
}
],
"symlink_target": ""
}
|
from rpython.rlib import jit
import interpret
import parse
import kernel_type as kt
def entry_point(argv):
jit.set_param(None, "trace_limit", 20000)
interpret.run(argv)
return 0
def target(driver, args):
return entry_point, None
|
{
"content_hash": "3090afe2a4a23d1cde7b9dc7c6cba417",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 45,
"avg_line_length": 19.076923076923077,
"alnum_prop": 0.7217741935483871,
"repo_name": "euccastro/icbink",
"id": "27704364c3a1239bddfb8f932f0a3115bfd78abd",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entry_point.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78717"
},
{
"name": "Shell",
"bytes": "563"
}
],
"symlink_target": ""
}
|
import random
def generate_random_stream(length):
"""Generate a random stream of booleans.
:param length: the stream length
:type length: int
:returns: iterator
"""
for _ in range(length):
yield bool(random.randint(0, 1))
|
{
"content_hash": "add604e053b7e9e54468d5296d1cb6f1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 23.272727272727273,
"alnum_prop": 0.6484375,
"repo_name": "simondolle/dgim",
"id": "8fd14996562b370eb08eb708ca056fc1ef8ca2ee",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dgim/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "8241"
},
{
"name": "Python",
"bytes": "23133"
},
{
"name": "Shell",
"bytes": "6455"
}
],
"symlink_target": ""
}
|
"""
Test the content negotiation pieces.
Content negotiation will put something in
the environment that allows us to determine
what kind of store and serializer to use.
"""
from tiddlyweb.web.negotiate import figure_type
from tiddlyweb.config import config
def setup_module(module):
module.environ = {'tiddlyweb.config': config}
environ['REQUEST_METHOD'] = 'GET'
def test_accept_header():
"""
Given an accept header in the environ,
determine the type we want.
"""
environ['HTTP_ACCEPT'] = 'text/plain; q=1.0, text/html; q=0.9, text/x-dvi; q=0.8, text/x-c'
figure_type(environ)
assert environ['tiddlyweb.type'][0] == 'text/plain'
def test_accept_ill_formed_header():
"""
Given an accept header in the environ,
that is poorly formed, use default.
"""
environ['HTTP_ACCEPT'] = '; q=1.0, text/plain; q=1.0, text/html, text/x-dvi; q=0.8, text/x-c'
figure_type(environ)
assert environ['tiddlyweb.type'][0] == 'text/html'
def test_accept_bad_q():
"""
Given a non-float q, ignore.
"""
environ['HTTP_ACCEPT'] = 'text/plain; q=hot, text/html, text/postscript; q=0.5'
figure_type(environ)
assert environ['tiddlyweb.type'][0] == 'text/html'
def test_accept_extension():
"""
Ignore non q= style parameters.
"""
environ['HTTP_ACCEPT'] = 'text/plain; cookies=chip'
figure_type(environ)
assert environ['tiddlyweb.type'][0] == 'text/plain'
def test_file_extension():
"""
Given a \.extension in the path_info,
determine the type we want.
"""
environ['PATH_INFO'] = '/bags/bag0/tiddlers/bigbox.html'
figure_type(environ)
assert environ['tiddlyweb.type'][0] == 'text/html', \
'tiddlyweb.type should be text/html, found %s' % environ['tiddlyweb.type'][0]
def test_file_wins_over_header():
"""
Where there is both an extension and
an accept header, the extension wins.
"""
environ['HTTP_ACCEPT'] = 'text/plain; q=1.0, text/html, text/x-dvi; q=0.8, text/x-c'
environ['PATH_INFO'] = '/bags/bag0/tiddlers/bigbox.html'
figure_type(environ)
assert environ['tiddlyweb.type'][0] == 'text/html', \
'tiddlyweb.type should be text/html, found %s' % environ['tiddlyweb.type'][0]
|
{
"content_hash": "708fb1c8e16561956651398de6176acd",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 97,
"avg_line_length": 27.987654320987655,
"alnum_prop": 0.6409351565946184,
"repo_name": "funkyeah/tiddlyweb",
"id": "254044f40d39a886a55f1408faba7485813468e9",
"size": "2268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_web_negotiate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
RdmaConfig - file ``/etc/rdma/rdma.conf``
=========================================
"""
from .. import get_active_lines, Parser, parser, LegacyItemAccess
from insights.parsers import split_kv_pairs
from insights.specs import Specs
from insights.parsers import SkipException
@parser(Specs.rdma_conf)
class RdmaConfig(Parser, LegacyItemAccess):
"""
This class will parse the output of file ``/etc/rdma/rdma.conf``.
The rdma service reads /etc/rdma/rdma.conf file to find out which
kernel-level and user-level RDMA protocols the administrator
wants to be loaded by default.
Attributes:
data (dict): Dictionary of keys with values in dict.
Sample configuration file::
IPOIB_LOAD=yes
# Load SRP (SCSI Remote Protocol initiator support) module
SRP_LOAD=yes
# Load SRPT (SCSI Remote Protocol target support) module
SRPT_LOAD=yes
# Load iSER (iSCSI over RDMA initiator support) module
ISER_LOAD=yes
# Load iSERT (iSCSI over RDMA target support) module
ISERT_LOAD=yes
# Load RDS (Reliable Datagram Service) network protocol
RDS_LOAD=no
# Load NFSoRDMA client transport module
XPRTRDMA_LOAD=yes
# Load NFSoRDMA server transport module
SVCRDMA_LOAD=no
# Load Tech Preview device driver modules
TECH_PREVIEW_LOAD=no
# Should we modify the system mtrr registers? We may need to do this if you
# get messages from the ib_ipath driver saying that it couldn't enable
# write combining for the PIO buffs on the card.
#
# Note: recent kernels should do this for us, but in case they don't, we'll
# leave this option
FIXUP_MTRR_REGS=no
Examples:
>>> rdma_conf['IPOIB_LOAD']
'yes'
>>> rdma_conf["SRP_LOAD"]
'yes'
>>> rdma_conf["SVCRDMA_LOAD"]
'no'
"""
def parse_content(self, content):
_content = get_active_lines(content)
if not _content:
raise SkipException("Empty content.")
self.data = split_kv_pairs(_content)
|
{
"content_hash": "355ea763f8adb490c2b0b4c12452ad21",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 84,
"avg_line_length": 33.328125,
"alnum_prop": 0.6310360993905297,
"repo_name": "RedHatInsights/insights-core",
"id": "dd57bc58439212bf22bb4a8217be43b1f4235ecf",
"size": "2133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/rdma_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "8219046"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
}
|
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Lookout for Vision"
prefix = "lookoutvision"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateDataset = Action("CreateDataset")
CreateModel = Action("CreateModel")
CreateProject = Action("CreateProject")
DeleteDataset = Action("DeleteDataset")
DeleteModel = Action("DeleteModel")
DeleteProject = Action("DeleteProject")
DescribeDataset = Action("DescribeDataset")
DescribeModel = Action("DescribeModel")
DescribeModelPackagingJob = Action("DescribeModelPackagingJob")
DescribeProject = Action("DescribeProject")
DescribeTrialDetection = Action("DescribeTrialDetection")
DetectAnomalies = Action("DetectAnomalies")
ListDatasetEntries = Action("ListDatasetEntries")
ListModelPackagingJobs = Action("ListModelPackagingJobs")
ListModels = Action("ListModels")
ListProjects = Action("ListProjects")
ListTagsForResource = Action("ListTagsForResource")
ListTrialDetections = Action("ListTrialDetections")
StartModel = Action("StartModel")
StartModelPackagingJob = Action("StartModelPackagingJob")
StartTrialDetection = Action("StartTrialDetection")
StopModel = Action("StopModel")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateDatasetEntries = Action("UpdateDatasetEntries")
|
{
"content_hash": "bdd4734ab85fa5abe11147744e1b98a0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 88,
"avg_line_length": 36.27272727272727,
"alnum_prop": 0.7600250626566416,
"repo_name": "cloudtools/awacs",
"id": "c85892f5f54cba86f7cf5c9d6019cc957818a69a",
"size": "1712",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "awacs/lookoutvision.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "963483"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import subprocess
import json
import os
import requests
import subprocess
import sys
from cli.settings import Settings
from cli.appconfig import AppConfig
from cli.containerconfig import ContainerConfig
from cli.utils import printException, printErrorMsg
requests.packages.urllib3.disable_warnings()
def describe():
return "streams the new output from the tasks's STDOUT and STDERR logs."
class RogerLogs(object):
def parse_args(self):
self.parser = argparse.ArgumentParser(
prog='roger logs', description=describe())
self.parser.add_argument('appTaskId', metavar='appTaskId',
help="first few letters of application task id. Example: 'content.5684")
self.parser.add_argument('-e', '--env', metavar='env',
help="environment to search. Example: 'dev' or 'stage'")
self.parser.add_argument('-H', '--hostname', metavar='hostname',
help="hostname to search. Example: 'daldevmesos01' or 'daldevmesos04'")
self.parser.add_argument(
'-f', '--follow', help="follow log output. Defaults to false.", action="store_true")
self.parser.add_argument(
'-t', '--timestamps', help="show timestamps. Defaults to false.", action="store_true")
self.parser.add_argument(
'-s', '--since', help="show logs since timestamp.")
self.parser.add_argument(
'-T', '--tail', help="number of lines to show from the end of the logs. If a negative number is given, it shows all.")
return self.parser
def main(self):
self.parser = self.parse_args()
args = self.parser.parse_args()
config_dir = settingObj.getConfigDir()
roger_env = appObj.getRogerEnv(config_dir)
environment = roger_env.get('default_environment', '')
if args.env is None:
if "ROGER_ENV" in os.environ:
env_var = os.environ.get('ROGER_ENV')
if env_var.strip() == '':
print(
"Environment variable $ROGER_ENV is not set.Using the default set from roger-mesos-tools.config file")
else:
print(
"Using value {} from environment variable $ROGER_ENV".format(env_var))
environment = env_var
else:
environment = args.env
if environment not in roger_env['environments']:
raise ValueError('Environment not found in roger-mesos-tools.config file.')
hostname = ''
containerId = ''
if args.hostname is None:
hostname = containerconfig.get_hostname_from_marathon(
environment, roger_env, args.appTaskId)
else:
hostname = args.hostname
if hostname != '': # Hostname maybe empty when the given appTaskId does not match any taskId from Marathon
(containerId, mesosTaskId) = containerconfig.get_containerid_mesostaskid(
args.appTaskId, hostname)
else:
print("Most likely hostname could not be retrieved with appTaskId {0}. Hostname is also \
an optional argument. See -h for usage.".format(args.appTaskId))
if containerId is not '' and containerId is not None:
print("If there are multiple containers that pattern match the given mesos task Id, \
then will log into the first one")
print("Displaying logs in docker container - {0} on host - {1} for mesosTask Id {2}".format(
containerId, hostname, mesosTaskId))
command = "docker -H tcp://{0}:4243 logs ".format(hostname)
if args.follow:
command = "{} -f=true".format(command)
else:
command = "{} -f=false".format(command)
if args.since:
command = "{} --since=\"{}\"".format(command, args.since)
if args.timestamps:
command = "{} -t".format(command, args.since)
if args.tail:
command = "{} --tail=\"{}\"".format(command, args.tail)
command = "{} {}".format(command, containerId)
try:
subprocess.check_call("{}".format(command), shell=True)
except (KeyboardInterrupt, SystemExit):
print("Exited.")
except (subprocess.CalledProcessError) as e:
printException(e)
else:
print("No Container found on host {0} with application Task Id {1}".format(hostname, args.appTaskId))
if __name__ == '__main__':
settingObj = Settings()
appObj = AppConfig()
containerconfig = ContainerConfig()
roger_logs = RogerLogs()
roger_logs.main()
|
{
"content_hash": "cb37fa0cb64fc34da57e99b09b262b1a",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 130,
"avg_line_length": 43.414414414414416,
"alnum_prop": 0.5922390537455904,
"repo_name": "seomoz/roger-mesos-tools",
"id": "7dfe9f52c86b6dccdd072dae9c7a76aeed0b749f",
"size": "4842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli/roger_logs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "291737"
},
{
"name": "Shell",
"bytes": "3948"
}
],
"symlink_target": ""
}
|
"""Datastore models used by the Google App Engine Pipeline API."""
from google.appengine.ext import db
from google.appengine.ext import blobstore
# Relative imports
import json
class _PipelineRecord(db.Model):
"""Represents a Pipeline.
Properties:
class_path: Path of the Python class to use for this pipeline.
root_pipeline: The root of the whole workflow; set to itself this pipeline
is its own root.
fanned_out: List of child _PipelineRecords that were started when this
generator pipeline moved from WAITING to RUN.
start_time: For pipelines with no start _BarrierRecord, when this pipeline
was enqueued to run immediately.
finalized_time: When this pipeline moved from WAITING or RUN to DONE.
params: Serialized parameter dictionary.
status: The current status of the pipeline.
current_attempt: The current attempt (starting at 0) to run.
max_attempts: Maximum number of attempts (starting at 0) to run.
next_retry_time: ETA of the next retry attempt.
retry_message: Why the last attempt failed; None or empty if no message.
Root pipeline properties:
is_root_pipeline: This is a root pipeline.
abort_message: Why the whole pipeline was aborted; only saved on
root pipelines.
abort_requested: If an abort signal has been requested for this root
pipeline; only saved on root pipelines
"""
WAITING = 'waiting'
RUN = 'run'
DONE = 'done'
ABORTED = 'aborted'
class_path = db.StringProperty()
root_pipeline = db.SelfReferenceProperty(
collection_name='child_pipelines_set')
fanned_out = db.ListProperty(db.Key, indexed=False)
start_time = db.DateTimeProperty(indexed=True)
finalized_time = db.DateTimeProperty(indexed=False)
# One of these two will be set, depending on the size of the params.
params_text = db.TextProperty(name='params')
params_blob = blobstore.BlobReferenceProperty(indexed=False)
status = db.StringProperty(choices=(WAITING, RUN, DONE, ABORTED),
default=WAITING)
# Retry behavior
current_attempt = db.IntegerProperty(default=0, indexed=False)
max_attempts = db.IntegerProperty(default=1, indexed=False)
next_retry_time = db.DateTimeProperty(indexed=False)
retry_message = db.TextProperty()
# Root pipeline properties
is_root_pipeline = db.BooleanProperty()
abort_message = db.TextProperty()
abort_requested = db.BooleanProperty(indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Record'
@property
def params(self):
"""Returns the dictionary of parameters for this Pipeline."""
if hasattr(self, '_params_decoded'):
return self._params_decoded
if self.params_blob is not None:
value_encoded = self.params_blob.open().read()
else:
value_encoded = self.params_text
value = json.loads(value_encoded)
if isinstance(value, dict):
kwargs = value.get('kwargs')
if kwargs:
adjusted_kwargs = {}
for arg_key, arg_value in kwargs.iteritems():
# Python only allows non-unicode strings as keyword arguments.
adjusted_kwargs[str(arg_key)] = arg_value
value['kwargs'] = adjusted_kwargs
self._params_decoded = value
return self._params_decoded
class _SlotRecord(db.Model):
"""Represents an output slot.
Properties:
root_pipeline: The root of the workflow.
filler: The pipeline that filled this slot.
value: Serialized value for this slot.
status: The current status of the slot.
fill_time: When the slot was filled by the filler.
"""
FILLED = 'filled'
WAITING = 'waiting'
root_pipeline = db.ReferenceProperty(_PipelineRecord)
filler = db.ReferenceProperty(_PipelineRecord,
collection_name='filled_slots_set')
# One of these two will be set, depending on the size of the value.
value_text = db.TextProperty(name='value')
value_blob = blobstore.BlobReferenceProperty(indexed=False)
status = db.StringProperty(choices=(FILLED, WAITING), default=WAITING,
indexed=False)
fill_time = db.DateTimeProperty(indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Slot'
@property
def value(self):
"""Returns the value of this Slot."""
if hasattr(self, '_value_decoded'):
return self._value_decoded
if self.value_blob is not None:
encoded_value = self.value_blob.open().read()
else:
encoded_value = self.value_text
self._value_decoded = json.loads(encoded_value)
return self._value_decoded
class _BarrierRecord(db.Model):
"""Represents a barrier.
Properties:
root_pipeline: The root of the workflow.
target: The pipeline to run when the barrier fires.
blocking_slots: The slots that must be filled before this barrier fires.
trigger_time: When this barrier fired.
status: The current status of the barrier.
"""
# Barrier statuses
FIRED = 'fired'
WAITING = 'waiting'
# Barrier trigger reasons (used as key names)
START = 'start'
FINALIZE = 'finalize'
ABORT = 'abort'
root_pipeline = db.ReferenceProperty(_PipelineRecord)
target = db.ReferenceProperty(_PipelineRecord,
collection_name='called_barrier_set')
blocking_slots = db.ListProperty(db.Key)
trigger_time = db.DateTimeProperty(indexed=False)
status = db.StringProperty(choices=(FIRED, WAITING), default=WAITING,
indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Barrier'
class _StatusRecord(db.Model):
"""Represents the current status of a pipeline.
Properties:
message: The textual message to show.
console_url: URL to iframe as the primary console for this pipeline.
link_names: Human display names for status links.
link_urls: URLs corresponding to human names for status links.
status_time: When the status was written.
"""
root_pipeline = db.ReferenceProperty(_PipelineRecord)
message = db.TextProperty()
console_url = db.TextProperty()
link_names = db.ListProperty(db.Text, indexed=False)
link_urls = db.ListProperty(db.Text, indexed=False)
status_time = db.DateTimeProperty(indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Status'
|
{
"content_hash": "f80b81ab26d08213a5c9e0322e157086",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 78,
"avg_line_length": 32.323076923076925,
"alnum_prop": 0.6945898778359512,
"repo_name": "rhefner1/ghidonations",
"id": "c08dfd5ab89d73a28796827081f19a3f6b0925da",
"size": "6902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "225830"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "HTML",
"bytes": "273771"
},
{
"name": "JavaScript",
"bytes": "333908"
},
{
"name": "Python",
"bytes": "1565508"
}
],
"symlink_target": ""
}
|
from .decorators import use_template_database
from .testcases import TemplateDBTestCase
from .testcases import TemplateDBTransactionTestCase
from .testcases import TemplateDBLiveServerTestCase
|
{
"content_hash": "3c8616b334345b1947def815f61d1bc8",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 52,
"avg_line_length": 48.5,
"alnum_prop": 0.8814432989690721,
"repo_name": "wilbuick/django-ttdb",
"id": "ad7c355ef7c43e9b988d7740be77dc6e86cd9b0b",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ttdb/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "327"
},
{
"name": "Python",
"bytes": "22182"
}
],
"symlink_target": ""
}
|
"""
Set up core extensions
"""
from .control import Control
def setup(bot):
"""Add core cogs to bot.
"""
bot.add_cog(Control(bot))
|
{
"content_hash": "4704416093906b02daac8203d474f28b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 29,
"avg_line_length": 13.272727272727273,
"alnum_prop": 0.6095890410958904,
"repo_name": "randomic/antinub-gregbot",
"id": "2ab611f167259c048069c86691f108b3a4cfdfc0",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58714"
}
],
"symlink_target": ""
}
|
try:
from django.conf.urls import patterns, url
except ImportError: # django < 1.4
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns("django_su.views",
url(r"^$", "su_exit", name="su_exit"),
url(r"^login/$", "su_login", name="su_login"),
url(r"^(?P<user_id>[\d]+)/$", "login_as_user", name="login_as_user"),
)
|
{
"content_hash": "8446c4c67748c01d9c89ea7e137a65cd",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 32.54545454545455,
"alnum_prop": 0.6229050279329609,
"repo_name": "Stackdriver/django-su",
"id": "7d3a69a645d4c8e1154eedfee97955222fcba4eb",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_su/urls.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import re
from bakery_lint.base import BakeryTestCase as TestCase
from bakery_lint.metadata import Metadata
class CheckFontNameNotInCamelCase(TestCase):
path = '.'
targets = ['metadata']
name = __name__
tool = 'lint'
longMessage = True
def read_metadata_contents(self):
return open(self.path).read()
def test_fontname_not_in_camel_case(self):
""" Check if fontname is not camel cased """
contents = self.read_metadata_contents()
familymetadata = Metadata.get_family_metadata(contents)
camelcased_fontnames = []
for font_metadata in familymetadata.fonts:
if bool(re.match(r'([A-Z][a-z]+){2,}', font_metadata.name)):
camelcased_fontnames.append(font_metadata.name)
if camelcased_fontnames:
self.fail(('%s are camel cased names. To solve this check just '
'use spaces in names.'))
|
{
"content_hash": "6bc63e1602f00d75602598adc17a90fd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 76,
"avg_line_length": 31.1,
"alnum_prop": 0.6302250803858521,
"repo_name": "lowks/fontbakery-cli",
"id": "ac7899eb67963d7e7706cee38527918cd13e6ba1",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bakery_lint/tests/downstream/test_check_font_name_not_in_camelcase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Tests for config/script."""
from unittest.mock import patch
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
async def test_delete_script(hass, hass_client):
"""Test deleting a script."""
with patch.object(config, 'SECTIONS', ['script']):
await async_setup_component(hass, 'config', {})
client = await hass_client()
orig_data = {
'one': {},
'two': {},
}
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch('homeassistant.components.config._read', mock_read), \
patch('homeassistant.components.config._write', mock_write):
resp = await client.delete('/api/config/script/config/two')
assert resp.status == 200
result = await resp.json()
assert result == {'result': 'ok'}
assert len(written) == 1
assert written[0] == {
'one': {}
}
|
{
"content_hash": "59b2bc0e83dc9e841469fab1524d4594",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 25.536585365853657,
"alnum_prop": 0.6074498567335244,
"repo_name": "aequitas/home-assistant",
"id": "d0848d18dcc7d81ce12b1ce755ed02a9a808475b",
"size": "1047",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/config/test_script.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
import os
import glob
import string
sourcepath = '\src'
distpath = '\dist'
sourcelist = []
for root, dirs, files in os.walk(sourcepath):
for name in files:
pathfile = os.path.join(root, name)
#ignore python extensions
pathfile = string.replace(pathfile, '.pyo', '')
pathfile = string.replace(pathfile, '.pyc', '')
pathfile = string.replace(pathfile, '.py', '')
#remove root path
pathfile = string.replace(pathfile, sourcepath, '')
sourcelist.append(pathfile)
distlist = []
for root, dirs, files in os.walk(distpath):
for name in files:
pathfile = os.path.join(root, name)
#ignore python extensions
pathfile = string.replace(pathfile, '.pyo', '')
pathfile = string.replace(pathfile, '.pyc', '')
pathfile = string.replace(pathfile, '.py', '')
#remove root path
pathfile = string.replace(pathfile, distpath, '')
distlist.append(pathfile)
sourceset = set(sourcelist)
distset = set(distlist)
missing = sourceset - distset
for (files) in sorted(missing):
print files
|
{
"content_hash": "b98dfefeaffecbf3d9f2953028eb554b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 60,
"avg_line_length": 27.046511627906977,
"alnum_prop": 0.6010318142734308,
"repo_name": "lantra/vugamedev",
"id": "81eefbfa653d4d3321eff5b6e883a6ef194c4dcd",
"size": "1512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comparison.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1382627"
},
{
"name": "C++",
"bytes": "863116"
},
{
"name": "CSS",
"bytes": "18584"
},
{
"name": "JavaScript",
"bytes": "1647"
},
{
"name": "Objective-C",
"bytes": "30562"
},
{
"name": "Python",
"bytes": "270355"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sentry.api.serializers import OrganizationMemberWithProjectsSerializer, serialize
from sentry.testutils import TestCase
class OrganizationMemberWithProjectsSerializerTest(TestCase):
def setUp(self):
self.owner_user = self.create_user("foo@localhost", username="foo")
self.user_2 = self.create_user("bar@localhost", username="bar")
self.org = self.create_organization(owner=self.owner_user)
self.org.member_set.create(user=self.user_2)
self.team = self.create_team(organization=self.org, members=[self.owner_user, self.user_2])
self.team_2 = self.create_team(organization=self.org, members=[self.user_2])
self.project = self.create_project(teams=[self.team])
self.project_2 = self.create_project(teams=[self.team_2])
def test_simple(self):
projects_ids = [self.project.id, self.project_2.id]
org_members = list(
self.org.member_set.filter(user__in=[self.owner_user, self.user_2]).order_by(
"user__email"
)
)
result = serialize(
org_members,
self.user_2,
OrganizationMemberWithProjectsSerializer(project_ids=projects_ids),
)
expected_projects = [[self.project.slug, self.project_2.slug], [self.project.slug]]
expected_projects[0].sort()
assert [r["projects"] for r in result] == expected_projects
projects_ids = [self.project_2.id]
result = serialize(
org_members,
self.user_2,
OrganizationMemberWithProjectsSerializer(project_ids=projects_ids),
)
expected_projects = [[self.project_2.slug], []]
assert [r["projects"] for r in result] == expected_projects
|
{
"content_hash": "8fd7c845b1add5bf3a41a04d5549f405",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 99,
"avg_line_length": 42.666666666666664,
"alnum_prop": 0.6428571428571429,
"repo_name": "mvaled/sentry",
"id": "1787acddd560e8b8294227c25a2a21a99d898382",
"size": "1817",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/api/serializers/test_organization_member.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
import traceback
from oslo.config import cfg
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.utils import misc
from cinder import exception
from cinder import flow_utils
from cinder.image import glance
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, scheduler_rpcapi):
requires = ['filter_properties', 'image_id', 'request_spec',
'snapshot_id', 'volume_id', 'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
]
def execute(self, **kwargs):
pass
def _reschedule(self, context, cause, request_spec, filter_properties,
snapshot_id, image_id, volume_id, **kwargs):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume_id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s" %
{'volume_id': volume_id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, CONF.volume_topic, volume_id,
snapshot_id=snapshot_id, image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, context, volume_id):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume_id)
def _pre_reschedule(self, context, volume_id):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Reset the volume state.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
update = {
'status': 'creating',
'scheduled_at': timeutils.utcnow(),
}
LOG.debug("Updating volume %(volume_id)s with %(update)s." %
{'update': update, 'volume_id': volume_id})
self.db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let resetting the status cause the rescheduling to fail.
LOG.exception(_("Volume %s: resetting 'creating' status failed."),
volume_id)
def revert(self, context, result, flow_failures, **kwargs):
# Check if we have a cause which can tell us not to reschedule.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
return
volume_id = kwargs['volume_id']
# Use a different context when rescheduling.
if self.reschedule_context:
context = self.reschedule_context
try:
cause = list(flow_failures.values())[0]
self._pre_reschedule(context, volume_id)
self._reschedule(context, cause, **kwargs)
self._post_reschedule(context, volume_id)
except exception.CinderException:
LOG.exception(_("Volume %s: rescheduling failed"), volume_id)
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'volume_ref'
def __init__(self, db, host):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
def execute(self, context, volume_id):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id)
# NOTE(vish): so we don't have to get volume from db again before
# passing it to the driver.
volume_ref['host'] = self.host
return volume_ref
def revert(self, context, volume_id, result, **kwargs):
if isinstance(result, misc.Failure):
return
common.error_out_volume(context, self.db, volume_id)
LOG.error(_("Volume %s: create failed"), volume_id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['image_id', 'snapshot_id', 'source_volid']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume_ref, **kwargs):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume_ref['name']
volume_size = utils.as_int(volume_ref['size'], quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume_ref['status'],
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image]
'volume_id': volume_ref['id'],
'volume_name': volume_name,
'volume_size': volume_size,
}
if kwargs.get('snapshot_id'):
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': kwargs['snapshot_id'],
})
elif kwargs.get('source_volid'):
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = kwargs['source_volid']
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref['status'],
'type': 'source_vol',
})
elif kwargs.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = kwargs['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, misc.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume_ref):
volume_id = volume_ref['id']
try:
volume_utils.notify_about_volume_usage(context, volume_ref,
self.event_suffix,
host=volume_ref['host'])
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception(_("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s") %
{'event': self.event_suffix,
'volume_id': volume_id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume'
def __init__(self, db, driver):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.db = db
self.driver = driver
def _handle_bootable_volume_glance_meta(self, context, volume_id,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
self._enable_bootable_flag(context, volume_id)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume_id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume_id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self._capture_volume_image_metadata(context, volume_id,
image_id, image_meta)
except exception.CinderException as ex:
LOG.exception(exception_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume_ref, snapshot_id,
**kwargs):
volume_id = volume_ref['id']
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
model_update = self.driver.create_volume_from_snapshot(volume_ref,
snapshot_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = self.db.volume_get(context,
snapshot_ref['volume_id'])
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference") %
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot_ref['volume_id']})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume_id,
snapshot_id=snapshot_id)
return model_update
def _enable_bootable_flag(self, context, volume_id):
try:
LOG.debug('Marking volume %s as bootable.', volume_id)
self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex:
LOG.exception(_("Failed updating volume %(volume_id)s bootable"
" flag to true") % {'volume_id': volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume_ref,
source_volid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_volid)
model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
source_volid=source_volid)
return model_update
def _copy_image_to_volume(self, context, volume_ref,
image_id, image_location, image_service):
"""Downloads Glance image to the specified volume."""
copy_image_to_volume = self.driver.copy_image_to_volume
volume_id = volume_ref['id']
LOG.debug("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s." %
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
try:
copy_image_to_volume(context, volume_ref, image_service, image_id)
except processutils.ProcessExecutionError as ex:
LOG.error(_("Failed to copy image %(image_id)s to volume: "
"%(volume_id)s, error: %(error)s") %
{'volume_id': volume_id,
'error': ex.stderr, 'image_id': image_id})
raise exception.ImageCopyFailure(reason=ex.stderr)
except exception.ImageUnacceptable as ex:
LOG.error(_("Failed to copy image to volume: %(volume_id)s, "
"error: %(error)s") % {'volume_id': volume_id,
'error': ex})
raise exception.ImageUnacceptable(ex)
except Exception as ex:
LOG.error(_("Failed to copy image %(image_id)s to "
"volume: %(volume_id)s, error: %(error)s") %
{'volume_id': volume_id, 'error': ex,
'image_id': image_id})
if not isinstance(ex, exception.ImageCopyFailure):
raise exception.ImageCopyFailure(reason=ex)
else:
raise
LOG.debug("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully." %
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
# Save some base attributes into the volume metadata
base_metadata = {
'image_id': image_id,
}
name = image_meta.get('name', None)
if name:
base_metadata['image_name'] = name
# Save some more attributes into the volume metadata from the image
# metadata
for key in IMAGE_ATTRIBUTES:
if key not in image_meta:
continue
value = image_meta.get(key, None)
if value is not None:
base_metadata[key] = value
# Save all the image metadata properties into the volume metadata
property_metadata = {}
image_properties = image_meta.get('properties', {})
for (key, value) in image_properties.items():
if value is not None:
property_metadata[key] = value
# NOTE(harlowja): The best way for this to happen would be in bulk,
# but that doesn't seem to exist (yet), so we go through one by one
# which means we can have partial create/update failure.
volume_metadata = dict(property_metadata)
volume_metadata.update(base_metadata)
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s." %
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
for (key, value) in volume_metadata.items():
try:
self.db.volume_glance_metadata_create(context, volume_id,
key, value)
except exception.GlanceMetadataExists:
pass
def _create_from_image(self, context, volume_ref,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s." %
{'volume_id': volume_ref['id'],
'image_location': image_location, 'image_id': image_id})
# Create the volume from an image.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
model_update, cloned = self.driver.clone_image(
volume_ref, image_location, image_id, image_meta)
if not cloned:
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
#
# Create the volume and then download the image onto the volume.
model_update = self.driver.create_volume(volume_ref)
updates = dict(model_update or dict(), status='downloading')
try:
volume_ref = self.db.volume_update(context,
volume_ref['id'], updates)
except exception.CinderException:
LOG.exception(_("Failed updating volume %(volume_id)s with "
"%(updates)s") %
{'volume_id': volume_ref['id'],
'updates': updates})
self._copy_image_to_volume(context, volume_ref,
image_id, image_location, image_service)
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_raw_volume(self, context, volume_ref, **kwargs):
return self.driver.create_volume(volume_ref)
def execute(self, context, volume_ref, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume_ref['id']
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.error(_("Unable to create volume. "
"Volume driver %s not initialized") % driver_name)
# NOTE(flaper87): Set the error status before
# raising any exception.
self.db.volume_update(context, volume_id, dict(status='error'))
raise exception.DriverNotInitialized()
create_type = volume_spec.pop('type', None)
LOG.info(_("Volume %(volume_id)s: being created as %(create_type)s "
"with specification: %(volume_spec)s") %
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
model_update = self._create_raw_volume(context,
volume_ref=volume_ref,
**volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context,
volume_ref=volume_ref,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume_ref=volume_ref,
**volume_spec)
else:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
# Persist any model information provided on creation.
try:
if model_update:
volume_ref = self.db.volume_update(context, volume_ref['id'],
model_update)
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception(_("Failed updating model of volume %(volume_id)s"
" with creation provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise
return volume_ref
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
volume_id = volume['id']
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'building' if this fails)??
volume_ref = self.db.volume_update(context, volume_id, update)
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
except exception.CinderException:
LOG.exception(_("Failed updating volume %(volume_id)s with "
"%(update)s") % {'volume_id': volume_id,
'update': update})
# Even if the update fails, the volume is ready.
msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
LOG.info(msg % {
'volume_name': volume_spec['volume_name'],
'volume_id': volume_id,
})
def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
allow_reschedule, reschedule_context, request_spec,
filter_properties, snapshot_id=None, image_id=None,
source_volid=None):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has start to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches a on-success *only* task that notifies that the volume creation
has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'image_id': image_id,
'request_spec': request_spec,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'volume_id': volume_id,
}
volume_flow.add(ExtractVolumeRefTask(db, host))
if allow_reschedule and request_spec:
volume_flow.add(OnFailureRescheduleTask(reschedule_context,
db, scheduler_rpcapi))
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(db, driver),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
|
{
"content_hash": "f17d69bb2752e816f6f9c6384e71dd53",
"timestamp": "",
"source": "github",
"line_count": 696,
"max_line_length": 79,
"avg_line_length": 45.668103448275865,
"alnum_prop": 0.5634104137171622,
"repo_name": "github-borat/cinder",
"id": "0e361b51a0752f87c35c949c938d3ee1a0970188",
"size": "32358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/flows/manager/create_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6575951"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
}
|
import collections
from collections import OrderedDict
from operator import index as operator_index
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
# Ensure that the row index is a valid index (int)
index = operator_index(index)
n = len(table)
if index < -n or index >= n:
raise IndexError('index {} out of range for table with length {}'
.format(index, len(table)))
# Finally, ensure the index is positive [#8422] and set Row attributes
self._index = index % n
self._table = table
def __getitem__(self, item):
try:
# Try the most common use case of accessing a single column in the Row.
# Bypass the TableColumns __getitem__ since that does more testing
# and allows a list of tuple or str, which is not the right thing here.
out = OrderedDict.__getitem__(self._table.columns, item)[self._index]
except (KeyError, TypeError):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
# This is only to raise an exception
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
def keys(self):
return self._table.columns.keys()
def values(self):
return self.__iter__()
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : ``numpy.void`` or ``numpy.ma.mvoid``
Copy of row values.
``numpy.void`` if unmasked, ``numpy.ma.mvoid`` else.
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
# The logic here is a little complicated to work around
# bug in numpy < 1.8 (numpy/numpy#483). Need to build up
# a np.ma.mvoid object by hand.
from .table import descr
# Make np.void version of masks. Use the table dtype but
# substitute bool for data type
masks = tuple(col.mask[index] if hasattr(col, 'mask') else False
for col in cols)
descrs = (descr(col) for col in cols)
mask_dtypes = [(name, bool, shape) for name, type_, shape in descrs]
row_mask = np.array([masks], dtype=mask_dtypes)[0]
# Make np.void version of values, and then the final mvoid row
row_vals = np.array([vals], dtype=self.dtype)[0]
void_row = np.ma.mvoid(data=row_vals, mask=row_mask)
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index:index + 1]
descr_vals = [self.__class__.__name__,
f'index={self.index}']
if table.masked:
descr_vals.append('masked=True')
return table._base_repr_(html, descr_vals, max_width=-1,
tableid=f'table{id(self._table)}')
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return '\n'.join(self.table[index:index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode('utf-8')
collections.abc.Sequence.register(Row)
|
{
"content_hash": "163f841f07fe5fdb119439efe75f1ee1",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 91,
"avg_line_length": 33.49238578680203,
"alnum_prop": 0.5678993634434677,
"repo_name": "aleksandr-bakanov/astropy",
"id": "931754510cacab30a8e25b594fd8362fac009b83",
"size": "6663",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/table/row.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898093"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from paddle.fluid import layers
from paddle.fluid.dygraph import Layer
from paddle.fluid.layers.control_flow import StaticRNN
__all__ = ['BasicGRUUnit', 'basic_gru', 'BasicLSTMUnit', 'basic_lstm']
class BasicGRUUnit(Layer):
"""
****
BasicGRUUnit class, using basic operators to build GRU
The algorithm can be described as the equations below.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
name_scope(string) : The name scope used to identify parameters and biases
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import BasicGRUUnit
input_size = 128
hidden_size = 256
input = layers.data( name = "input", shape = [-1, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
gru_unit = BasicGRUUnit( "gru_unit", hidden_size )
new_hidden = gru_unit( input, pre_hidden )
"""
def __init__(self,
name_scope,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32'):
super(BasicGRUUnit, self).__init__(name_scope, dtype)
self._name = name_scope
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._dtype = dtype
def _build_once(self, input, pre_hidden):
self._input_size = input.shape[-1]
assert (self._input_size > 0)
self._gate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 2 * self._hiden_size],
dtype=self._dtype)
self._candidate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, self._hiden_size],
dtype=self._dtype)
self._gate_bias = self.create_parameter(
self._bias_attr,
shape=[2 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
self._candidate_bias = self.create_parameter(
self._bias_attr,
shape=[self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden):
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._gate_weight)
gate_input = layers.elementwise_add(gate_input, self._gate_bias)
gate_input = self._gate_activation(gate_input)
r, u = layers.split(gate_input, num_or_sections=2, dim=1)
r_hidden = r * pre_hidden
candidate = layers.matmul(
layers.concat([input, pre_hidden], 1), self._candidate_weight)
candidate = layers.elementwise_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_hidden = u * pre_hidden + (1 - u) * c
return new_hidden
def basic_gru(input,
init_hidden,
hidden_size,
num_layers=1,
sequence_length=None,
dropout_prob=0.0,
bidirectional=False,
batch_first=True,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32',
name='basic_gru'):
"""
GRU implementation using basic operator, supports multiple layers and bidirection gru.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
input (Variable): GRU input tensor,
if batch_first = False, shape should be ( seq_len x batch_size x input_size )
if batch_first = True, shape should be ( batch_size x seq_len x hidden_size )
init_hidden(Variable|None): The initial hidden state of the GRU
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to tensor with ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
hidden_size (int): Hidden size of the GRU
num_layers (int): The total number of layers of the GRU
sequence_length (Variabe|None): A Tensor (shape [batch_size]) stores each real length of each instance,
This tensor will be convert to a mask to mask the padding ids
If it's None means NO padding ids
dropout_prob(float|0.0): Dropout prob, dropout ONLY works after rnn output of earch layers,
NOT between time steps
bidirectional (bool|False): If it is bidirectional
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
name(string): name used to identify parameters and biases
Returns:
rnn_out(Tensor),last_hidden(Tensor)
- rnn_out is result of GRU hidden, with shape (seq_len x batch_size x hidden_size) \
if is_bidirec set to True, shape will be ( seq_len x batch_sze x hidden_size*2)
- last_hidden is the hidden state of the last step of GRU \
shape is ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size),
can be reshaped to a tensor with shape( num_layers x 2 x batch_size x hidden_size)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import basic_gru
batch_size = 20
input_size = 128
hidden_size = 256
num_layers = 2
dropout = 0.5
bidirectional = True
batch_first = False
input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32')
rnn_out, last_hidden = basic_gru( input, pre_hidden, hidden_size, num_layers = num_layers, \
sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \
batch_first = batch_first)
"""
fw_unit_list = []
for i in range(num_layers):
new_name = name + "_layers_" + str(i)
fw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if bidirectional:
bw_unit_list = []
for i in range(num_layers):
new_name = name + "_reverse_layers_" + str(i)
bw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if batch_first:
input = layers.transpose(input, [1, 0, 2])
mask = None
if sequence_length:
max_seq_len = layers.shape(input)[0]
mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32')
mask = layers.transpose(mask, [1, 0])
direc_num = 1
if bidirectional:
direc_num = 2
if init_hidden:
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size])
def get_single_direction_output(rnn_input,
unit_list,
mask=None,
direc_index=0):
rnn = StaticRNN()
with rnn.step():
step_input = rnn.step_input(rnn_input)
if mask:
step_mask = rnn.step_input(mask)
for i in range(num_layers):
if init_hidden:
pre_hidden = rnn.memory(init=init_hidden[i, direc_index])
else:
pre_hidden = rnn.memory(
batch_ref=rnn_input,
shape=[-1, hidden_size],
ref_batch_dim_idx=1)
new_hidden = unit_list[i](step_input, pre_hidden)
if mask:
new_hidden = layers.elementwise_mul(
new_hidden, step_mask, axis=0) - layers.elementwise_mul(
pre_hidden, (step_mask - 1), axis=0)
rnn.update_memory(pre_hidden, new_hidden)
rnn.step_output(new_hidden)
step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0:
step_input = layers.dropout(
step_input,
dropout_prob=dropout_prob, )
rnn.step_output(step_input)
rnn_out = rnn()
last_hidden_array = []
rnn_output = rnn_out[-1]
for i in range(num_layers):
last_hidden = rnn_out[i]
last_hidden = last_hidden[-1]
last_hidden_array.append(last_hidden)
last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size])
return rnn_output, last_hidden_output
# seq_len, batch_size, hidden_size
fw_rnn_out, fw_last_hidden = get_single_direction_output(
input, fw_unit_list, mask, direc_index=0)
if bidirectional:
bw_input = layers.reverse(input, axis=[0])
bw_mask = None
if mask:
bw_mask = layers.reverse(mask, axis=[0])
bw_rnn_out, bw_last_hidden = get_single_direction_output(
bw_input, bw_unit_list, bw_mask, direc_index=1)
bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0])
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size])
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden
else:
rnn_out = fw_rnn_out
last_hidden = fw_last_hidden
if batch_first:
rnn_out = fluid.layser.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden
def basic_lstm(input,
init_hidden,
init_cell,
hidden_size,
num_layers=1,
sequence_length=None,
dropout_prob=0.0,
bidirectional=False,
batch_first=True,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype='float32',
name='basic_lstm'):
"""
LSTM implementation using basic operators, supports multiple layers and bidirection LSTM.
.. math::
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i)
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias )
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o)
\\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c)
c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t}
h_t &= o_t \odot tanh(c_t)
Args:
input (Variable): lstm input tensor,
if batch_first = False, shape should be ( seq_len x batch_size x input_size )
if batch_first = True, shape should be ( batch_size x seq_len x hidden_size )
init_hidden(Variable|None): The initial hidden state of the LSTM
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
init_cell(Variable|None): The initial hidden state of the LSTM
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
hidden_size (int): Hidden size of the LSTM
num_layers (int): The total number of layers of the LSTM
sequence_length (Variabe|None): A tensor (shape [batch_size]) stores each real length of each instance,
This tensor will be convert to a mask to mask the padding ids
If it's None means NO padding ids
dropout_prob(float|0.0): Dropout prob, dropout ONLY work after rnn output of earch layers,
NOT between time steps
bidirectional (bool|False): If it is bidirectional
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of LSTM unit.
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
forget_bias (float|1.0) : Forget bias used to compute the forget gate
dtype(string): Data type used in this unit
name(string): Name used to identify parameters and biases
Returns:
rnn_out(Tensor), last_hidden(Tensor), last_cell(Tensor)
- rnn_out is the result of LSTM hidden, shape is (seq_len x batch_size x hidden_size) \
if is_bidirec set to True, it's shape will be ( seq_len x batch_sze x hidden_size*2)
- last_hidden is the hidden state of the last step of LSTM \
with shape ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size),
and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use.
- last_cell is the hidden state of the last step of LSTM \
with shape ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size),
and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use.
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import basic_lstm
batch_size = 20
input_size = 128
hidden_size = 256
num_layers = 2
dropout = 0.5
bidirectional = True
batch_first = False
input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32')
sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32')
rnn_out, last_hidden, last_cell = basic_lstm( input, pre_hidden, pre_cell, \
hidden_size, num_layers = num_layers, \
sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \
batch_first = batch_first)
"""
fw_unit_list = []
for i in range(num_layers):
new_name = name + "_layers_" + str(i)
fw_unit_list.append(
BasicLSTMUnit(
new_name,
hidden_size,
param_attr=param_attr,
bias_attr=bias_attr,
gate_activation=gate_activation,
activation=activation,
forget_bias=forget_bias,
dtype=dtype))
if bidirectional:
bw_unit_list = []
for i in range(num_layers):
new_name = name + "_reverse_layers_" + str(i)
bw_unit_list.append(
BasicLSTMUnit(
new_name,
hidden_size,
param_attr=param_attr,
bias_attr=bias_attr,
gate_activation=gate_activation,
activation=activation,
forget_bias=forget_bias,
dtype=dtype))
if batch_first:
input = layers.transpose(input, [1, 0, 2])
mask = None
if sequence_length:
max_seq_len = layers.shape(input)[0]
mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32')
mask = layers.transpose(mask, [1, 0])
direc_num = 1
if bidirectional:
direc_num = 2
# convert to [num_layers, 2, batch_size, hidden_size]
if init_hidden:
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size])
init_cell = layers.reshape(
init_cell, shape=[num_layers, direc_num, -1, hidden_size])
# forward direction
def get_single_direction_output(rnn_input,
unit_list,
mask=None,
direc_index=0):
rnn = StaticRNN()
with rnn.step():
step_input = rnn.step_input(rnn_input)
if mask:
step_mask = rnn.step_input(mask)
for i in range(num_layers):
if init_hidden:
pre_hidden = rnn.memory(init=init_hidden[i, direc_index])
pre_cell = rnn.memory(init=init_cell[i, direc_index])
else:
pre_hidden = rnn.memory(
batch_ref=rnn_input, shape=[-1, hidden_size])
pre_cell = rnn.memory(
batch_ref=rnn_input, shape=[-1, hidden_size])
new_hidden, new_cell = unit_list[i](step_input, pre_hidden,
pre_cell)
if mask:
new_hidden = layers.elementwise_mul(
new_hidden, step_mask, axis=0) - layers.elementwise_mul(
pre_hidden, (step_mask - 1), axis=0)
new_cell = layers.elementwise_mul(
new_cell, step_mask, axis=0) - layers.elementwise_mul(
pre_cell, (step_mask - 1), axis=0)
rnn.update_memory(pre_hidden, new_hidden)
rnn.update_memory(pre_cell, new_cell)
rnn.step_output(new_hidden)
rnn.step_output(new_cell)
step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0:
step_input = layers.dropout(
step_input,
dropout_prob=dropout_prob,
dropout_implementation='upscale_in_train')
rnn.step_output(step_input)
rnn_out = rnn()
last_hidden_array = []
last_cell_array = []
rnn_output = rnn_out[-1]
for i in range(num_layers):
last_hidden = rnn_out[i * 2]
last_hidden = last_hidden[-1]
last_hidden_array.append(last_hidden)
last_cell = rnn_out[i * 2 + 1]
last_cell = last_cell[-1]
last_cell_array.append(last_cell)
last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size])
last_cell_output = layers.concat(last_cell_array, axis=0)
last_cell_output = layers.reshape(
last_cell_output, shape=[num_layers, -1, hidden_size])
return rnn_output, last_hidden_output, last_cell_output
# seq_len, batch_size, hidden_size
fw_rnn_out, fw_last_hidden, fw_last_cell = get_single_direction_output(
input, fw_unit_list, mask, direc_index=0)
if bidirectional:
bw_input = layers.reverse(input, axis=[0])
bw_mask = None
if mask:
bw_mask = layers.reverse(mask, axis=[0])
bw_rnn_out, bw_last_hidden, bw_last_cell = get_single_direction_output(
bw_input, bw_unit_list, bw_mask, direc_index=1)
bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0])
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size])
last_cell = layers.concat([fw_last_cell, bw_last_cell], axis=1)
last_cell = layers.reshape(
last_cell, shape=[num_layers * direc_num, -1, hidden_size])
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden, last_cell
else:
rnn_out = fw_rnn_out
last_hidden = fw_last_hidden
last_cell = fw_last_cell
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden, last_cell
class BasicLSTMUnit(Layer):
"""
****
BasicLSTMUnit class, Using basic operator to build LSTM
The algorithm can be described as the code below.
.. math::
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i)
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias )
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o)
\\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c)
c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t}
h_t &= o_t \odot tanh(c_t)
- $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix
of weights from the input gate to the input)
- The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector).
- sigmoid is the logistic sigmoid function.
- $i, f, o$ and $c$ are the input gate, forget gate, output gate,
and cell activation vectors, respectively, all of which have the same size as
the cell output activation vector $h$.
- The :math:`\odot` is the element-wise product of the vectors.
- :math:`tanh` is the activation functions.
- :math:`\\tilde{c_t}` is also called candidate hidden state,
which is computed based on the current input and the previous hidden state.
Args:
name_scope(string) : The name scope used to identify parameter and bias name
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of LSTM unit.
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized as zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cells (actNode).
Default: 'fluid.layers.tanh'
forget_bias(float|1.0): forget bias used when computing forget gate
dtype(string): data type used in this unit
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import BasicLSTMUnit
input_size = 128
hidden_size = 256
input = layers.data( name = "input", shape = [-1, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32')
lstm_unit = BasicLSTMUnit( "gru_unit", hidden_size)
new_hidden, new_cell = lstm_unit( input, pre_hidden, pre_cell )
"""
def __init__(self,
name_scope,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype='float32'):
super(BasicLSTMUnit, self).__init__(name_scope, dtype)
self._name = name_scope
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._forget_bias = layers.fill_constant(
[1], dtype=dtype, value=forget_bias)
self._forget_bias.stop_gradient = False
self._dtype = dtype
def _build_once(self, input, pre_hidden, pre_cell):
self._input_size = input.shape[-1]
assert (self._input_size > 0)
self._weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 4 * self._hiden_size],
dtype=self._dtype)
self._bias = self.create_parameter(
attr=self._bias_attr,
shape=[4 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden, pre_cell):
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._weight)
gate_input = layers.elementwise_add(gate_input, self._bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
new_cell = layers.elementwise_add(
layers.elementwise_mul(
pre_cell,
layers.sigmoid(layers.elementwise_add(f, self._forget_bias))),
layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)))
new_hidden = layers.tanh(new_cell) * layers.sigmoid(o)
return new_hidden, new_cell
|
{
"content_hash": "7efa5c97355dffb1c4628a464de6d375",
"timestamp": "",
"source": "github",
"line_count": 729,
"max_line_length": 118,
"avg_line_length": 41.611796982167355,
"alnum_prop": 0.5549695071699358,
"repo_name": "tensor-tang/Paddle",
"id": "e6a868ada37ab9fb27f973b4bfe648387bb4279f",
"size": "30946",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/contrib/layers/rnn_impl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10161819"
},
{
"name": "CMake",
"bytes": "290828"
},
{
"name": "Cuda",
"bytes": "1183095"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7082088"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
}
|
import os, sys
from lxml import etree
def parse_properties(fb):
doc = fb["doc"]
properties = []
for p in doc.xpath("/filebase/properties/property"):
properties.append({"id": int(p.attrib["id"]), "name": p.find("name").text})
# Check if property id's are sequential
for i in range(len(properties)):
if properties[i]["id"] != i:
sys.exit("ERROR: `property`s `id`s are not sequential")
return properties
def parse_values(fb):
doc = fb["doc"]
properties = fb["properties"]
values = []
for pid in range(len(properties)):
values.append([])
for p in doc.xpath("//filebase/files/file/property"):
if int(p.attrib["pid"]) > len(properties):
sys.exit("ERROR: `pid` is wrong")
if p.text not in values[int(p.attrib["pid"])]:
values[int(p.attrib["pid"])].append(p.text)
for pid in range(len(properties)):
values[pid].sort()
return values
def build_XPath(fb, query_table):
XPath = "/filebase/files/file["
for pid in range(len(fb["properties"])):
for i in range(len(fb["values"][pid])):
if query_table[pid][i]:
XPath += "property[@pid='{0}'] = '{1}' and ".format(pid, fb["values"][pid][i])
if XPath == "/filebase/files/file[": # Nothing is selected, query all files
XPath = XPath[:-1] # remove '[' at the end
else:
XPath = XPath[:-5] + "]" # remove 'and ' at the end and add ']' to end
return XPath
|
{
"content_hash": "564ef4ad170c79559fa6dc9185b27d2f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 82,
"avg_line_length": 26.01923076923077,
"alnum_prop": 0.6385809312638581,
"repo_name": "X54329/filebase-qt4",
"id": "d5e54f3d40339eaa957c833474eeccb00baefe15",
"size": "2471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filebase/filebase.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from flask import jsonify
from server import app
from server.database import *
# MARK: List routes
@app.route('/api/lists', methods=['GET'])
def get_lists():
response = {}
response['lists'] = [l.__dict__ for l in db_get_lists()]
return jsonify(response)
|
{
"content_hash": "61b782588b86f2a5b38ab63c4ed96707",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 24.363636363636363,
"alnum_prop": 0.667910447761194,
"repo_name": "FroeMic/CDTM-Backend-Workshop-WT2016",
"id": "67b4f1d22bf791ecd60577c110d87d053e95e4d3",
"size": "268",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "solutions/server/server-09-connect-database/server/routes/list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2309764"
},
{
"name": "HTML",
"bytes": "204484"
},
{
"name": "JavaScript",
"bytes": "4859190"
},
{
"name": "Python",
"bytes": "187974"
}
],
"symlink_target": ""
}
|
class mergeable_dict(dict):
"""dict with merge() method."""
def is_compatible_with(self, other):
for key in self:
if key in other and self[key] != other[key]:
return False
return True
def merge(self, other):
for key in other:
if key in self:
if self[key] != other[key]:
raise ValueError
else:
self[key] = other[key]
return self
def __ior__(self, other):
return self.merge(other)
def __or__(self, other):
result = mergeable_dict(self)
for key in other:
if key in result:
if result[key] != other[key]:
raise ValueError
else:
result[key] = other[key]
return result
|
{
"content_hash": "a3723cf5dd5f2f3f42b83ce9c9224d9f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 56,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.4807228915662651,
"repo_name": "ActiveState/code",
"id": "542334455c1df10f2668fd39f74f65ee13158fc2",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/576473_mergeabledict/recipe-576473.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E08000034'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 5.tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 5.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
|
{
"content_hash": "dc6457f47971a8091b57c0de580c00a2",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 82,
"avg_line_length": 48.5,
"alnum_prop": 0.7551546391752577,
"repo_name": "chris48s/UK-Polling-Stations",
"id": "036863177ee073e3e81f865b8618e91f2810d46f",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_collection/management/commands/import_kirklees.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "347"
},
{
"name": "Gherkin",
"bytes": "3720"
},
{
"name": "HTML",
"bytes": "30715"
},
{
"name": "JavaScript",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "589520"
}
],
"symlink_target": ""
}
|
""" Operating System formatter """
from operator import attrgetter
from aquilon.aqdb.model import OperatingSystem
from aquilon.worker.formats.formatters import ObjectFormatter
class OSFormatter(ObjectFormatter):
def format_raw(self, os, indent="", embedded=True, indirect_attrs=True):
details = []
details.append(indent + "{0:c}: {0.name}".format(os))
details.append(indent + " Version: %s" % os.version)
if not embedded:
details.append(indent + " Archetype: %s" % os.archetype)
for dbsrv in sorted(os.required_services, key=attrgetter("name")):
details.append(indent + " Required Service: %s" % dbsrv.name)
details.append(indent + " Lifecycle: %s" % os.lifecycle)
if os.comments:
details.append(indent + " Comments: %s" % os.comments)
return "\n".join(details)
def fill_proto(self, os, skeleton, embedded=True, indirect_attrs=True):
skeleton.name = os.name
skeleton.version = os.version
lifecycle_enum = skeleton.DESCRIPTOR.fields_by_name['lifecycle'].enum_type
skeleton.lifecycle = lifecycle_enum.values_by_name[os.lifecycle.name.upper()].number
# We don't need the services here, so don't call redirect_proto()
self.redirect_proto(os.archetype, skeleton.archetype,
indirect_attrs=False)
ObjectFormatter.handlers[OperatingSystem] = OSFormatter()
|
{
"content_hash": "5b21cf9f98b77ad9095156df6cf8538f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 92,
"avg_line_length": 41.31428571428572,
"alnum_prop": 0.656984785615491,
"repo_name": "guillaume-philippon/aquilon",
"id": "b7082a597ca8a61a360fec62c274d43d6eac3b38",
"size": "2169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/aquilon/worker/formats/operating_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Eutester'
copyright = u'2012, Vic Iglesias, Matt Clark, Harold Spencer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.8'
# The full version, including alpha/beta/rc tags.
release = '0.0.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Eutesterdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Eutester.tex', u'Eutester Documentation',
u'Vic Iglesias, Matt Clark, Harold Spencer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nephoria', u'Eutester Documentation',
[u'Vic Iglesias, Matt Clark, Harold Spencer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Eutester', u'Eutester Documentation',
u'Vic Iglesias, Matt Clark, Harold Spencer', 'Eutester', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Eutester'
epub_author = u'Vic Iglesias, Matt Clark, Harold Spencer'
epub_publisher = u'Vic Iglesias, Matt Clark, Harold Spencer'
epub_copyright = u'2012, Vic Iglesias, Matt Clark, Harold Spencer'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
{
"content_hash": "fc192326ba422d4f1f431812229008e5",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 95,
"avg_line_length": 32.393382352941174,
"alnum_prop": 0.7029849052320962,
"repo_name": "nephomaniac/nephoria",
"id": "a888bc5c233282402bac197a1c1610df34825f00",
"size": "9230",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "924331"
},
{
"name": "Shell",
"bytes": "775"
}
],
"symlink_target": ""
}
|
"""Internal resolver for persistent identifiers."""
from __future__ import absolute_import, print_function
from sqlalchemy.orm.exc import NoResultFound
from .errors import (
PIDDeletedError,
PIDMissingObjectError,
PIDRedirectedError,
PIDUnregistered,
)
from .models import PersistentIdentifier
class Resolver(object):
"""Persistent identifier resolver.
Helper class for retrieving an internal object for a given persistent
identifier.
"""
def __init__(
self, pid_type=None, object_type=None, getter=None, registered_only=True
):
"""Initialize resolver.
:param pid_type: Persistent identifier type.
:param object_type: Object type.
:param getter: Callable that will take an object id for the given
object type and retrieve the internal object.
"""
self.pid_type = pid_type
self.object_type = object_type
self.object_getter = getter
self.registered_only = registered_only
def resolve(self, pid_value):
"""Resolve a persistent identifier to an internal object.
:param pid_value: Persistent identifier.
:returns: A tuple containing (pid, object).
"""
pid = PersistentIdentifier.get(self.pid_type, pid_value)
if pid.is_new() or pid.is_reserved():
if self.registered_only:
raise PIDUnregistered(pid)
else:
obj_id = pid.get_assigned_object(object_type=self.object_type)
if pid.is_deleted():
obj_id = pid.get_assigned_object(object_type=self.object_type)
try:
obj = self.object_getter(obj_id) if obj_id else None
except NoResultFound:
obj = None
raise PIDDeletedError(pid, obj)
if pid.is_redirected():
raise PIDRedirectedError(pid, pid.get_redirect())
obj_id = pid.get_assigned_object(object_type=self.object_type)
if not obj_id:
raise PIDMissingObjectError(self.pid_type, pid_value)
return pid, self.object_getter(obj_id)
|
{
"content_hash": "00b7af277081b174536679b2578fc323",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 31.55223880597015,
"alnum_prop": 0.6315042573320719,
"repo_name": "inveniosoftware/invenio-pidstore",
"id": "5fb28aebea1e81862346b0ec2a2444d6557e5256",
"size": "2349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_pidstore/resolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126932"
},
{
"name": "Shell",
"bytes": "839"
}
],
"symlink_target": ""
}
|
from gireoan.Analyser import Analyser
from gireoan.Errors import NoSettingsFileException
try:
import local_settings
except:
raise (NoSettingsFileException())
# Start main
if __name__ == '__main__':
repo_analyser = Analyser(
repo_name=local_settings.REPOSITORY_PATH,
searching_paths=local_settings.SEARCHING_PATHS,
allowed_endings=local_settings.ALLOWED_ENDINGS,
exclude_patters=local_settings.EXCLUDE_PATTERNS,
exclude_paths=local_settings.EXCLUDE_PATHS
)
repo_analyser.do_analyse()
repo_analyser.report_file_endings()
repo_analyser.report_for_all_authors()
|
{
"content_hash": "ac2fdd9c66af4ab28b7d2c3cdda1e653",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 56,
"avg_line_length": 28.59090909090909,
"alnum_prop": 0.7170111287758346,
"repo_name": "saeschdivara/GitRepoAnalyser",
"id": "967d7e75ef2dd4970ec13ba3d39538b67d980fdc",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14460"
},
{
"name": "Java",
"bytes": "78690"
},
{
"name": "JavaScript",
"bytes": "2187540"
},
{
"name": "PHP",
"bytes": "6276"
},
{
"name": "Python",
"bytes": "23638"
}
],
"symlink_target": ""
}
|
import github3
import requests
import urllib
import urlparse
from flask import session
import ob2.config as config
from ob2.util.security import generate_shitty_random_string
github_oauth_url = "https://github.com/login/oauth"
access_token_url = github_oauth_url + "/access_token"
authorize_url = github_oauth_url + "/authorize"
user_url = "https://github.com"
class AuthenticationError(Exception):
pass
class AuthenticationTemporaryError(AuthenticationError):
pass
class AuthenticationIntegrityError(AuthenticationError):
pass
def get_authentication_provider_url(redirect_uri):
if config.github_oauth_consumer_key:
state = generate_shitty_random_string()
session["authentication_oauth_state"] = state
return "%s?%s" % (authorize_url, urllib.urlencode({
"client_id": config.github_oauth_consumer_key,
"redirect_uri": redirect_uri,
"state": state}))
def authentication_provider_get_token(code, state):
if not state or session.get("authentication_oauth_state") != state:
raise AuthenticationIntegrityError("OAuth state parameter does not match")
response = requests.post(access_token_url,
data={"client_id": config.github_oauth_consumer_key,
"client_secret": config.github_oauth_consumer_secret,
"code": code})
if response.status_code != 200:
raise AuthenticationTemporaryError("Failed to get OAuth api token from GitHub")
response_dict = dict(urlparse.parse_qsl(response.text))
if "access_token" not in response_dict:
raise AuthenticationTemporaryError("GitHub OAuth response did not contain access_token")
return response_dict["access_token"]
def get_username_from_token(token):
github = github3.login(token=token)
github_user = github.user()
return github_user.login
def github_username():
return session.get("github_username")
def authenticate_as_github_username(username):
if username:
session["github_username"] = username
elif "github_username" in session:
del session["github_username"]
def is_ta():
username = github_username()
return username and username in config.github_ta_usernames
|
{
"content_hash": "e7c565c208f6276a6457c28674bf077a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 96,
"avg_line_length": 30.905405405405407,
"alnum_prop": 0.6891123742894621,
"repo_name": "octobear2/ob2",
"id": "e1340faec3f7bd02035e532627c4d6da9e776104",
"size": "2287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ob2/util/github_login.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3196"
},
{
"name": "CoffeeScript",
"bytes": "6053"
},
{
"name": "HTML",
"bytes": "109099"
},
{
"name": "JavaScript",
"bytes": "13577"
},
{
"name": "Puppet",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "183885"
},
{
"name": "Shell",
"bytes": "4137"
}
],
"symlink_target": ""
}
|
from InstagramAPI.src.http.Response.Objects.Comment import Comment
from .Response import Response
class CommentResponse(Response):
def __init__(self, response):
self.comment = None
if self.STATUS_OK == response['status']:
if 'comment' in response and response['comment']:
self.comments = Comment(response['comment'])
else:
self.setMessage(response['message'])
self.setStatus(response['status'])
def getComment(self):
return self.comment
|
{
"content_hash": "50336dc4519e891bd8dc6594617e1548",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 29.444444444444443,
"alnum_prop": 0.6396226415094339,
"repo_name": "danleyb2/Instagram-API",
"id": "3a038d24848202f652a348a5e4b5f0e48024f806",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InstagramAPI/src/http/Response/CommentResponse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165254"
}
],
"symlink_target": ""
}
|
from functools import partial
from random import randint
from emmett import App, request
from emmett.orm import Database, Model, Field, rowmethod
from emmett.tools import service
app = App(__name__)
class World(Model):
tablename = "world"
randomnumber = Field.int()
@rowmethod('serialize')
def _serialize(self, row):
return {'id': row.id, 'randomNumber': row.randomnumber}
class Fortune(Model):
tablename = "fortune"
message = Field.string()
@rowmethod('serialize')
def _serialize(self, row):
return {'id': row.id, 'message': row.message}
app.config.handle_static = False
app.config.db.adapter = 'postgres:psycopg2'
app.config.db.host = 'tfb-database'
app.config.db.user = 'benchmarkdbuser'
app.config.db.password = 'benchmarkdbpass'
app.config.db.database = 'hello_world'
app.config.db.pool_size = 10
db = Database(app)
db.define_models(World, Fortune)
@app.route()
@service.json
async def json():
return {'message': 'Hello, World!'}
@app.route("/db", pipeline=[db.pipe])
@service.json
async def get_random_world():
return World.get(randint(1, 10000)).serialize()
def get_qparam():
try:
rv = int(request.query_params.queries or 1)
except ValueError:
return 1
if rv < 1:
return 1
if rv > 500:
return 500
return rv
@app.route("/queries", pipeline=[db.pipe])
@service.json
async def get_random_worlds():
num_queries = get_qparam()
worlds = [
World.get(randint(1, 10000)).serialize() for _ in range(num_queries)]
return worlds
@app.route(pipeline=[db.pipe], output='template')
async def fortunes():
fortunes = Fortune.all().select()
fortunes.append(
Fortune.new(id=0, message="Additional fortune added at request time."))
fortunes.sort(lambda m: m.message)
return {'fortunes': fortunes}
@app.route(pipeline=[db.pipe])
@service.json
async def updates():
num_queries = get_qparam()
worlds = []
rp = partial(randint, 1, 10000)
ids = [rp() for _ in range(num_queries)]
ids.sort() # To avoid deadlock
for id in ids:
world = World.get(id)
world.update_record(randomnumber=rp())
worlds.append(world.serialize())
return worlds
@app.route(output='bytes')
async def plaintext():
return b'Hello, World!'
|
{
"content_hash": "0cdef7f5be79c3cd51bf00fcd95256c6",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 23.24,
"alnum_prop": 0.6579173838209983,
"repo_name": "greenlaw110/FrameworkBenchmarks",
"id": "4544f1a7db915feaf8d035844c36247777b86c1e",
"size": "2325",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "frameworks/Python/emmett/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "104"
},
{
"name": "Batchfile",
"bytes": "1125"
},
{
"name": "C",
"bytes": "176072"
},
{
"name": "C#",
"bytes": "453771"
},
{
"name": "C++",
"bytes": "170038"
},
{
"name": "CMake",
"bytes": "6315"
},
{
"name": "CSS",
"bytes": "2035"
},
{
"name": "Clojure",
"bytes": "80807"
},
{
"name": "Common Lisp",
"bytes": "22084"
},
{
"name": "Crystal",
"bytes": "27193"
},
{
"name": "D",
"bytes": "203825"
},
{
"name": "Dart",
"bytes": "52130"
},
{
"name": "Dockerfile",
"bytes": "327921"
},
{
"name": "Dylan",
"bytes": "868"
},
{
"name": "Elixir",
"bytes": "14368"
},
{
"name": "Erlang",
"bytes": "41222"
},
{
"name": "F#",
"bytes": "89739"
},
{
"name": "Go",
"bytes": "163503"
},
{
"name": "Groovy",
"bytes": "21834"
},
{
"name": "HTML",
"bytes": "141462"
},
{
"name": "Hack",
"bytes": "2261"
},
{
"name": "Haskell",
"bytes": "70225"
},
{
"name": "Java",
"bytes": "679190"
},
{
"name": "JavaScript",
"bytes": "174521"
},
{
"name": "Kotlin",
"bytes": "57654"
},
{
"name": "Lua",
"bytes": "14508"
},
{
"name": "Makefile",
"bytes": "4321"
},
{
"name": "Meson",
"bytes": "846"
},
{
"name": "MoonScript",
"bytes": "2396"
},
{
"name": "Nim",
"bytes": "1288"
},
{
"name": "PHP",
"bytes": "504030"
},
{
"name": "PLpgSQL",
"bytes": "3446"
},
{
"name": "Perl",
"bytes": "15376"
},
{
"name": "Python",
"bytes": "332042"
},
{
"name": "QMake",
"bytes": "2301"
},
{
"name": "Racket",
"bytes": "5069"
},
{
"name": "Ruby",
"bytes": "88707"
},
{
"name": "Rust",
"bytes": "81497"
},
{
"name": "Scala",
"bytes": "101711"
},
{
"name": "Shell",
"bytes": "96313"
},
{
"name": "Smarty",
"bytes": "436"
},
{
"name": "Swift",
"bytes": "101361"
},
{
"name": "TypeScript",
"bytes": "14303"
},
{
"name": "UrWeb",
"bytes": "4453"
},
{
"name": "Vala",
"bytes": "1579"
},
{
"name": "Visual Basic",
"bytes": "27087"
},
{
"name": "Volt",
"bytes": "511"
}
],
"symlink_target": ""
}
|
import hashlib
from pathlib import PurePath
from .base import Mangler
class HashMangler(Mangler):
def __init__(self, target, hash_name='md5', suffixes=None):
super().__init__(target)
self.hash_name = hash_name
self.suffixes = suffixes or ['.css', '.js']
def can_process(self, file_obj):
return file_obj.current_name.suffix in self.suffixes
def hash_file(self, content):
return hashlib.new(self.hash_name, content).hexdigest()
def process_file(self, file_obj):
extensions = ''.join(file_obj.current_name.suffixes)
ext_len = len(extensions)
base_name = str(file_obj.current_name)[:-ext_len]
hash_value = self.hash_file(file_obj.content)
hash_name = '{}-{}{}'.format(base_name, hash_value, extensions)
file_obj.current_name = PurePath(hash_name)
yield file_obj
|
{
"content_hash": "b6795eef289c1dcfbcf026386ffd565a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 30.310344827586206,
"alnum_prop": 0.6359499431171786,
"repo_name": "funkybob/django-mangle",
"id": "bfbc2664fa5bb7c4a60d5af5face087a61470769",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mangle/hasher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51"
},
{
"name": "JavaScript",
"bytes": "41"
},
{
"name": "Python",
"bytes": "11648"
}
],
"symlink_target": ""
}
|
"""
Information, resources and vulnerabilities database API.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["Database"]
from ..config import Config
from ...common import Singleton
from ...messaging.codes import MessageCode
#------------------------------------------------------------------------------
class Database(Singleton):
"""
Access to information, resources and vulnerabilities found by the plugins.
"""
#--------------------------------------------------------------------------
@staticmethod
def add(data):
"""
Add data to the database.
:param data: Data to add.
:type data: Data
:returns: True if the data was added, False if it was updated.
:rtype: bool
"""
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_ADD, data)
#--------------------------------------------------------------------------
@staticmethod
def async_add(data):
"""
Asynchronously add data to the database.
:param data: Data to add.
:type data: Data
"""
Config._context.async_remote_call(
MessageCode.MSG_RPC_DATA_ADD, data)
#--------------------------------------------------------------------------
@staticmethod
def async_add_many(dataset):
"""
Asynchronously add multiple data objects to the database.
:param dataset: Data to add.
:type dataset: list(Data)
"""
Config._context.async_remote_call(
MessageCode.MSG_RPC_DATA_ADD_MANY, dataset)
#--------------------------------------------------------------------------
@staticmethod
def remove(identity):
"""
Remove an object given its identity hash.
.. warning: Only use this if you *really* know what you're doing!
:param identity: Identity hash.
:type identity: str
:returns: True if the object was removed, False if it didn't exist.
:rtype: bool
"""
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_REMOVE, identity)
#--------------------------------------------------------------------------
@staticmethod
def async_remove(identity):
"""
Asynchronously remove an object given its identity hash.
.. warning: Only use this if you *really* know what you're doing!
:param identity: Identity hash.
:type identity: str
"""
Config._context.async_remote_call(
MessageCode.MSG_RPC_DATA_REMOVE, identity)
#--------------------------------------------------------------------------
@staticmethod
def async_remove_many(identities):
"""
Asynchronously remove multiple objects given their identity hashes.
.. warning: Only use this if you *really* know what you're doing!
:param identities: Identity hashes.
:type identities: str
"""
Config._context.async_remote_call(
MessageCode.MSG_RPC_DATA_REMOVE_MANY, identities)
#--------------------------------------------------------------------------
@staticmethod
def has_key(identity):
"""
Check if an object with the given
identity hash is present in the database.
:param identity: Identity hash.
:type identity: str
:returns: True if the object is present, False otherwise.
:rtype: bool
"""
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_CHECK, identity)
#--------------------------------------------------------------------------
@staticmethod
def get(identity):
"""
Get an object given its identity hash.
:param identity: Identity hash.
:type identity: str
:returns: Data object if found, None otherwise.
:rtype: Data | None
"""
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_GET, identity)
#--------------------------------------------------------------------------
@staticmethod
def get_many(identities):
"""
Get an object given its identity hash.
:param identities: Identity hashes.
:type identities: list(str)
:returns: Data objects.
:rtype: list(Data)
"""
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_GET_MANY, identities)
#--------------------------------------------------------------------------
@staticmethod
def keys(data_type = None, data_subtype = None):
"""
Get the identity hashes for all objects of the requested
type, optionally filtering by subtype.
:param data_type: Optional data type. One of the Data.TYPE_* values.
:type data_type: int | None
:param data_subtype: Optional data subtype.
:type data_subtype: str | None
:returns: Identity hashes.
:rtype: set(str)
"""
if data_type is None:
if data_subtype is not None:
raise NotImplementedError(
"Can't filter by subtype for all types")
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_KEYS, data_type, data_subtype)
#--------------------------------------------------------------------------
@staticmethod
def count(data_type = None, data_subtype = None):
"""
Count all objects of the requested type,
optionally filtering by subtype.
:param data_type: Optional data type. One of the Data.TYPE_* values.
:type data_type: int | None
:param data_subtype: Optional data subtype.
:type data_subtype: str | None
:returns: Count of requested objects.
:rtype: int
"""
if data_type is None:
if data_subtype is not None:
raise NotImplementedError(
"Can't filter by subtype for all types")
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_COUNT, data_type, data_subtype)
#--------------------------------------------------------------------------
@classmethod
def iterate(self, data_type = None, data_subtype = None):
"""
Iterate through all objects of the requested type,
optionally filtering by subtype.
:param data_type: Optional data type. One of the Data.TYPE_* values.
:type data_type: int | None
:param data_subtype: Optional data subtype.
:type data_subtype: str | None
:returns: Generator of Data objects.
:rtype: generator(Data)
"""
for identity in self.keys(data_type, data_subtype):
yield self.get(identity)
#--------------------------------------------------------------------------
@staticmethod
def get_plugin_history(identity):
"""
Find out which plugins have already processed this data object.
:param identity: Identity hash.
:type identity: str
:returns: Names of the plugins that already processed this data object.
:rtype: set(str)
"""
return Config._context.remote_call(
MessageCode.MSG_RPC_DATA_PLUGINS, identity)
#--------------------------------------------------------------------------
@classmethod
def __len__(self):
return self.count()
#--------------------------------------------------------------------------
@classmethod
def __contains__(self, data):
try:
identity = data.identity
except AttributeError:
identity = data
return self.has_key(identity)
#--------------------------------------------------------------------------
@classmethod
def __iter__(self):
"""
Iterate through all objects of the database.
:returns: Generator of Data objects.
:rtype: generator(Data)
"""
return self.iterate()
|
{
"content_hash": "5eca27e6a5e4060e4eb45912ddb76db5",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 79,
"avg_line_length": 30.90972222222222,
"alnum_prop": 0.5206695124691081,
"repo_name": "JeyZeta/Dangerous",
"id": "165d1c9363c82bb5e38b8b8a55bb04cf8a9a6c5e",
"size": "8949",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/golismero/api/data/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# TODO add tests for all of these
EQ_FUNCTION = lambda item_value, test_value: item_value == test_value
NE_FUNCTION = lambda item_value, test_value: item_value != test_value
LE_FUNCTION = lambda item_value, test_value: item_value <= test_value
LT_FUNCTION = lambda item_value, test_value: item_value < test_value
GE_FUNCTION = lambda item_value, test_value: item_value >= test_value
GT_FUNCTION = lambda item_value, test_value: item_value > test_value
COMPARISON_FUNCS = {
'EQ': EQ_FUNCTION,
'=': EQ_FUNCTION,
'NE': NE_FUNCTION,
'!=': NE_FUNCTION,
'LE': LE_FUNCTION,
'<=': LE_FUNCTION,
'LT': LT_FUNCTION,
'<': LT_FUNCTION,
'GE': GE_FUNCTION,
'>=': GE_FUNCTION,
'GT': GT_FUNCTION,
'>': GT_FUNCTION,
'NULL': lambda item_value: item_value is None,
'NOT_NULL': lambda item_value: item_value is not None,
'CONTAINS': lambda item_value, test_value: test_value in item_value,
'NOT_CONTAINS': lambda item_value, test_value: test_value not in item_value,
'BEGINS_WITH': lambda item_value, test_value: item_value.startswith(test_value),
'IN': lambda item_value, test_value: item_value in test_value,
'BETWEEN': lambda item_value, lower_test_value, upper_test_value: lower_test_value <= item_value <= upper_test_value,
}
def get_comparison_func(range_comparison):
return COMPARISON_FUNCS.get(range_comparison)
|
{
"content_hash": "d8f4ee11fafb18a1494595e9cd6e0211",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 121,
"avg_line_length": 34.853658536585364,
"alnum_prop": 0.6773967809657103,
"repo_name": "pior/moto",
"id": "808e120bc6a85b71b349337f355a487a3d7583eb",
"size": "1429",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "moto/dynamodb2/comparisons.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "203"
},
{
"name": "Python",
"bytes": "1455169"
}
],
"symlink_target": ""
}
|
from annoying.functions import get_object_or_None
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect
from django.template import RequestContext
from django.template.response import TemplateResponse
from pixelpuncher.game.utils.message import add_game_message
from pixelpuncher.item.forms import ContainerForm
from pixelpuncher.item.models import PlayerContainer, Item, Container
from pixelpuncher.item.utils import take_item_from_container, drop_item, assign_container
from pixelpuncher.player.decorators import player_required
@login_required
@player_required
def open_container(request, player, container_id):
request.session['container_id'] = container_id
player_container = get_object_or_None(PlayerContainer, container_id=container_id, player=player)
if player_container is None:
container = get_object_or_404(Container, id=container_id)
player_container = assign_container(player, container)
if request.method == "POST":
form = ContainerForm(request.POST, player_container=player_container)
result = form.save()
add_game_message(player, result)
else:
form = ContainerForm(player_container=player_container)
context = {
"user": player.user,
"player": player,
"player_container": player_container,
"form": form
}
return TemplateResponse(
request, "item/container/detail.html", RequestContext(request, context))
@login_required
@player_required
def take_item(request, player, item_id):
item = get_object_or_404(Item, pk=item_id)
player_container_id = item.container.pk
if item.container.player == player:
result = take_item_from_container(player, item)
add_game_message(player, result)
return redirect("container:open", player_container_id)
@login_required
@player_required
def discard_item(request, player, item_id):
item = get_object_or_404(Item, pk=item_id)
player_container_id = item.container.pk
if item.container.player == player:
result = drop_item(item.pk)
add_game_message(player, result)
return redirect("container:open", player_container_id)
|
{
"content_hash": "5d89fc8637e15c46fcd314c8d2b52976",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 100,
"avg_line_length": 34.13846153846154,
"alnum_prop": 0.7273546642631816,
"repo_name": "ej2/pixelpuncher",
"id": "389b08efe01c05aedbe1790d5399685b98c81441",
"size": "2219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixelpuncher/item/views/container.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "155880"
},
{
"name": "HTML",
"bytes": "108414"
},
{
"name": "JavaScript",
"bytes": "29178"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "282954"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
}
|
"""The SleuthKit (TSK) format analyzer helper implementation."""
from dfvfs.analyzer import analyzer
from dfvfs.analyzer import analyzer_helper
from dfvfs.analyzer import specification
from dfvfs.lib import definitions
class TSKAnalyzerHelper(analyzer_helper.AnalyzerHelper):
"""Class that implements the TSK analyzer helper."""
FORMAT_CATEGORIES = frozenset([
definitions.FORMAT_CATEGORY_FILE_SYSTEM])
TYPE_INDICATOR = definitions.TYPE_INDICATOR_TSK
def GetFormatSpecification(self):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object.
"""
format_specification = specification.FormatSpecification(
self.type_indicator)
# FAT volume header signature.
format_specification.AddNewSignature(b'\x55\xaa', offset=510)
if definitions.PREFERRED_NTFS_BACK_END == self.TYPE_INDICATOR:
# NTFS file system signature.
format_specification.AddNewSignature(b'NTFS ', offset=3)
# HFS boot block signature.
format_specification.AddNewSignature(b'LK', offset=0)
# HFS master directory block signature.
format_specification.AddNewSignature(b'BD', offset=0)
# HFS+ file system signature.
format_specification.AddNewSignature(b'H+', offset=1024)
# HFSX file system signature.
format_specification.AddNewSignature(b'HX', offset=1024)
# Ext file system signature.
format_specification.AddNewSignature(b'\x53\xef', offset=1080)
# ISO9660 file system signature.
format_specification.AddNewSignature(b'CD001', offset=32769)
# YAFFS file system signature.
return format_specification
analyzer.Analyzer.RegisterHelper(TSKAnalyzerHelper())
|
{
"content_hash": "0392236da6d25f7e0fe4e7bcd9240b68",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 31.105263157894736,
"alnum_prop": 0.7422447828539199,
"repo_name": "dc3-plaso/dfvfs",
"id": "b9a49928d61dfff59516092e05cb7df6a8f723fa",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfvfs/analyzer/tsk_analyzer_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "609"
},
{
"name": "Python",
"bytes": "1397977"
},
{
"name": "Shell",
"bytes": "1522"
}
],
"symlink_target": ""
}
|
from time import time
import datetime
import os
import shutil
import webbrowser
import argparse
import ttk
from Tkinter import *
import Tkinter as tk
from tkFileDialog import askopenfilename
import tkMessageBox
import train_classifier
import predict_folders
from bookmark_db import database
from ml_add import add
from bmedit import edit
from bmsearch import *
def load_file(custom_path):
if not os.path.exists("data"):
os.makedirs("data")
dest = "data/"
filepath = os.path.expanduser("~/.config/google-chrome/Default/Bookmarks")
if not os.path.exists("data/Bookmarks"):
if custom_path == 1:
openfilewin = Tk()
openfilewin.withdraw()
bmfile = askopenfilename(parent = openfilewin,filetypes = (("Chrome bookmark file", "Bookmarks"),))
shutil.copy(os.path.abspath(bmfile),os.path.abspath(dest))
openfilewin.destroy()
else:
shutil.copy(filepath,os.path.abspath(dest))
class mainui(object):
def __init__(self):
self.dpath = "./data/test.db"
self.fpath = "./data/Bookmarks"
self.currurl = ""
self.db = database(self.dpath,self.fpath)
self.t0 = time()
self.db.get_features()
self.clf,self.vectorizer,self.Training_error = train_classifier.train()
self.bmbar = {}
self.read()
def selectedurl(self,a):
curItem = self.tree.focus()
if self.tree.item(curItem)['tags'][0] == "url":
id = self.tree.item(curItem)['tags'][1]
self.openinbrowser()
def selecteditem(self,a):
curItem = self.tree.focus()
if self.tree.item(curItem)['tags'][0] == "url":
self.current = (1,self.tree.item(curItem)['tags'][1])
sql = "select * from bookmarks where id = ?"
bookm = bookmark(self.db.sqlselect(sql,(self.current[1],))[0])
self.textd.show(bookm.url)
self.currurl = bookm.url
else:
self.current = (0,self.tree.item(curItem)['text'])
#print self.current
def add(self):
a = add(self.db,self.clf,self.vectorizer,self)
self.update_tree()
def edit(self):
e = edit(self.current,self.db)
self.update_tree()
def search(self):
s = search(self.db)
def delete(self):
if self.current[0]==0:
#folder
sql = "delete from bookmarks where folder = ?"
self.db.sql(sql,(self.current[1],))
else:
#url
sql = "select * from bookmarks where id = ?"
bookm = bookmark(self.db.sqlselect(sql,(self.current[1],))[0])
sql = "delete from bookmarks where id = ?"
self.db.sql(sql,(self.current[1],))
self.update_tree()
def update_tree(self):
self.tree.destroy()
self.read()
self.build_tktree()
self.tree.pack(side = LEFT)
self.S.configure(command=self.tree.yview)
self.tree.configure(yscroll=self.S.set,height = 20)
def read(self):
self.bmbar = {}
a = self.db.sqlselect("select * from bookmarks order by folder asc")
for bm in a:
bookm = bookmark(bm)
if bookm.folder not in self.bmbar.keys():
self.bmbar[bookm.folder] = []
self.bmbar[bookm.folder].append(bookm)
def build_tktree(self):
self.tree = ttk.Treeview(self.ft,height = 20)
self.tree.column("#0",minwidth=500,width=1000,stretch = True)
self.tree.heading('#0', text="Bookmarks", anchor='w')
self.tree.bind('<Double-Button-1>', self.selectedurl)
self.tree.bind('<ButtonRelease-1>', self.selecteditem)
for key in sorted(self.bmbar.keys()):
id = self.tree.insert("","end",text = key,tags = "folder")
for bookm in self.bmbar[key]:
self.tree.insert(id,"end",text = "".join([ch for ch in bookm.name if ord(ch)<= 128]),tags = ["url",bookm.id])
def create_layout(self):
self.root = Tk()
self.root.title("Smart Bookmarks")
self.root.geometry("1020x550+50+50")
self.ft = Frame(self.root)
self.tt = Frame(self.root)
self.textd = display_url(self.tt)
bt = Frame(self.root)
b1 = Button(bt,text="Copy Url",command = self.toclipboard)
b2 = Button(bt,text="Browse",command = self.openinbrowser)
menubar = Menu(self.root)
menubar.add_command(label = "Add",command = self.add)
menubar.add_command(label = "Edit",command = self.edit)
menubar.add_command(label = "Delete",command = self.delete)
menubar.add_command(label = "Search",command = self.search)
self.root.config(menu = menubar)
#b1.configure(command = self.copyurl)
#b2.configure(command = self.submit)
b1.pack(side = LEFT,padx = 10,pady =5)
b2.pack(side = LEFT,padx = 10,pady =5)
#b3.pack(side = LEFT,padx = 10,pady =5)
self.build_tktree()
self.S = Scrollbar(self.ft, command=self.tree.yview)
self.tree.configure(yscroll=self.S.set,height = 20)
self.tree.pack(side = LEFT)
self.S.pack(side = RIGHT,fill = BOTH)
self.ft.pack()
self.tt.pack()
bt.pack()
def toclipboard(self):
root = Tk()
root.withdraw()
root.clipboard_clear()
root.clipboard_append(self.currurl)
root.destroy()
def openinbrowser(self):
try:
webbrowser.open_new_tab(self.currurl)
except:
print "error opening default browser"
def start(self):
self.create_layout()
self.root.mainloop()
parser = argparse.ArgumentParser(description='Smart Bookmarks.')
parser.add_argument('--custom-file', dest='custom', action='store_const',const=1, default=0,help='chose a custom chrome bookmarks file')
#args = sys.argsgv[1:]
args = parser.parse_args()
load_file(args.custom)
app = mainui()
app.start()
app.db.con.commit()
|
{
"content_hash": "1a31cdb2e535782db7406e843b070972",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 136,
"avg_line_length": 28.01063829787234,
"alnum_prop": 0.6779339156855299,
"repo_name": "alphawing/SmartBookmarks",
"id": "e4fd61fb0ff752d4e79d0c3a1dac4480797b0e39",
"size": "5266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SmartBookmarks/SmartBookmarks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22584"
}
],
"symlink_target": ""
}
|
"""
tipfyext.wtforms
~~~~~~~~~~~~~~~~~
Enhanced WTForms form library support for tipfy.
:copyright: 2011 WTForms authors.
:copyright: 2011 tipfy.org.
:copyright: 2009 Plurk Inc.
:license: BSD, see LICENSE.txt for more details.
"""
from tipfy import REQUIRED_VALUE
from tipfyext.wtforms import validators, widgets
from tipfyext.wtforms.fields import *
from tipfyext.wtforms.form import Form
from tipfyext.wtforms.validators import ValidationError
#: Default configuration values for this module. Keys are:
#:
#: - ``recaptcha_options``: A dictionary of options to customize the look of
#: the reCAPTCHA widget. See a description of the available options in
#: the `API docs <http://recaptcha.net/apidocs/captcha/client.html>`_.
#:
#: - ``recaptcha_use_ssl``: ``True`` to use SSL for ReCaptcha requests,
#: ``False`` otherwise.
#:
#: - ``recaptcha_public_key``: Public key for ReCaptcha.
#:
#: - ``recaptcha_private_key``: Private key for ReCaptcha.
#:
#: - ``csrf_tokens``: Maximum number of CSRF protection tokens to store in
#: session.
default_config = {
'recaptcha_options': None,
'recaptcha_use_ssl': False,
'recaptcha_public_key': REQUIRED_VALUE,
'recaptcha_private_key': REQUIRED_VALUE,
'csrf_tokens': 5,
}
|
{
"content_hash": "df403090377a3dd1eb27a23ece03755b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 76,
"avg_line_length": 32.3,
"alnum_prop": 0.68343653250774,
"repo_name": "stanlee321/pysolper",
"id": "bd2be4481f8a4a10254cbf73d70f6b61dcdbcded",
"size": "1316",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "permit/lib/dist/tipfyext/wtforms/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15802"
},
{
"name": "HTML",
"bytes": "60816"
},
{
"name": "JavaScript",
"bytes": "12262"
},
{
"name": "Python",
"bytes": "4268083"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from irma.common.utils import sql
import config.parser as config
engine = create_engine(config.sqldb.url, echo=config.sql_debug_enabled(),
connect_args={"sslmode": config.sqldb.sslmode,
"sslrootcert": config.sqldb.sslrootcert,
"sslcert": config.sqldb.sslcert,
"sslkey": config.sqldb.sslkey})
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False,
bind=engine))
def session_transaction():
return sql.transaction(db_session)
def session_query():
return sql.query(db_session)
|
{
"content_hash": "8c1d7154381a2834efae5f8a547a83e4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 35.31818181818182,
"alnum_prop": 0.6036036036036037,
"repo_name": "quarkslab/irma",
"id": "9f2dece37e7b440c1e86a6e235be4a6a1d824218",
"size": "1302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/api/common/sessions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "79"
},
{
"name": "CSS",
"bytes": "86535"
},
{
"name": "DIGITAL Command Language",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "2366"
},
{
"name": "HTML",
"bytes": "26577"
},
{
"name": "JavaScript",
"bytes": "1774854"
},
{
"name": "Jinja",
"bytes": "2672"
},
{
"name": "Less",
"bytes": "13774"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PowerShell",
"bytes": "15660"
},
{
"name": "Python",
"bytes": "797592"
},
{
"name": "Shell",
"bytes": "61907"
}
],
"symlink_target": ""
}
|
from django import forms
from django.http import HttpResponseRedirect
from django.shortcuts import render
def search(request):
return render(request, "search/search.html")
|
{
"content_hash": "81b7bbec76a95bd18f7889680dbb0ca2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 25.428571428571427,
"alnum_prop": 0.8033707865168539,
"repo_name": "burk/helgapp",
"id": "1f0abfd0144c0b0f9c88b107eda11518df897189",
"size": "178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37"
},
{
"name": "JavaScript",
"bytes": "5248"
},
{
"name": "Python",
"bytes": "10508"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from datetime import timedelta
import # broken
import a
import b
import foo # broken
import z
from
from a import C1
from alphabet import *
from alphabet import B, A
from alphabet import C
from alphabet import D
from b import func
from . import m1
from . import m4, m5
from .pkg import m3
from .. import m2
print(z, b, a, C1, func, sys, abc, foo, timedelta, A, B, C, D, m1, m2, m3, m4, m5)
|
{
"content_hash": "dc83026751418577093050f31c7d8c47",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 82,
"avg_line_length": 19.28,
"alnum_prop": 0.7302904564315352,
"repo_name": "hurricup/intellij-community",
"id": "b06b7ad267a8a2d0964fadd5bf38e24767530a74",
"size": "482",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/testData/optimizeImports/alphabeticalOrder.after.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "59458"
},
{
"name": "C",
"bytes": "215610"
},
{
"name": "C#",
"bytes": "1538"
},
{
"name": "C++",
"bytes": "196925"
},
{
"name": "CSS",
"bytes": "197224"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Cucumber",
"bytes": "14382"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groff",
"bytes": "35232"
},
{
"name": "Groovy",
"bytes": "2831828"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1809290"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "156277117"
},
{
"name": "JavaScript",
"bytes": "563135"
},
{
"name": "Jupyter Notebook",
"bytes": "92629"
},
{
"name": "Kotlin",
"bytes": "1888388"
},
{
"name": "Lex",
"bytes": "179397"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "52097"
},
{
"name": "Objective-C",
"bytes": "28750"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6607"
},
{
"name": "Python",
"bytes": "23832829"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "61583"
},
{
"name": "Smalltalk",
"bytes": "64"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
}
|
from openpyxl.compat import zip
from openpyxl.workbook import Workbook
from openpyxl.worksheet import Worksheet
from openpyxl.writer.comments import CommentWriter
from openpyxl.comments import Comment
from openpyxl.tests.helper import compare_xml
from openpyxl.xml.functions import fromstring, get_document_content
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.writer.comments import vmlns, excelns
def _create_ws():
wb = Workbook()
ws = Worksheet(wb)
comment1 = Comment("text", "author")
comment2 = Comment("text2", "author2")
comment3 = Comment("text3", "author3")
ws.cell(coordinate="B2").comment = comment1
ws.cell(coordinate="C7").comment = comment2
ws.cell(coordinate="D9").comment = comment3
return ws, comment1, comment2, comment3
def test_comment_writer_init():
ws, comment1, comment2, comment3 = _create_ws()
cw = CommentWriter(ws)
assert set(cw.authors) == set(["author", "author2", "author3"])
assert set(cw.comments) == set([comment1, comment2, comment3])
def test_write_comments(datadir):
datadir.chdir()
ws = _create_ws()[0]
cw = CommentWriter(ws)
content = cw.write_comments()
with open('comments1.xml') as expected:
correct = fromstring(expected.read())
check = fromstring(content)
# check top-level elements have the same name
for i, j in zip(correct.getchildren(), check.getchildren()):
assert i.tag == j.tag
correct_comments = correct.find('{%s}commentList' % SHEET_MAIN_NS).getchildren()
check_comments = check.find('{%s}commentList' % SHEET_MAIN_NS).getchildren()
correct_authors = correct.find('{%s}authors' % SHEET_MAIN_NS).getchildren()
check_authors = check.find('{%s}authors' % SHEET_MAIN_NS).getchildren()
# replace author ids with author names
for i in correct_comments:
i.attrib["authorId"] = correct_authors[int(i.attrib["authorId"])].text
for i in check_comments:
i.attrib["authorId"] = check_authors[int(i.attrib["authorId"])].text
# sort the comment list
correct_comments.sort(key=lambda tag: tag.attrib["ref"])
check_comments.sort(key=lambda tag: tag.attrib["ref"])
correct.find('{%s}commentList' % SHEET_MAIN_NS)[:] = correct_comments
check.find('{%s}commentList' % SHEET_MAIN_NS)[:] = check_comments
# sort the author list
correct_authors.sort(key=lambda tag: tag.text)
check_authors.sort(key=lambda tag:tag.text)
correct.find('{%s}authors' % SHEET_MAIN_NS)[:] = correct_authors
check.find('{%s}authors' % SHEET_MAIN_NS)[:] = check_authors
diff = compare_xml(get_document_content(correct), get_document_content(check))
assert diff is None, diff
def test_write_comments_vml(datadir):
datadir.chdir()
ws = _create_ws()[0]
cw = CommentWriter(ws)
content = cw.write_comments_vml()
with open('commentsDrawing1.vml') as expected:
correct = fromstring(expected.read())
check = fromstring(content)
correct_ids = []
correct_coords = []
check_ids = []
check_coords = []
for i in correct.findall("{%s}shape" % vmlns):
correct_ids.append(i.attrib["id"])
row = i.find("{%s}ClientData" % excelns).find("{%s}Row" % excelns).text
col = i.find("{%s}ClientData" % excelns).find("{%s}Column" % excelns).text
correct_coords.append((row,col))
# blank the data we are checking separately
i.attrib["id"] = "0"
i.find("{%s}ClientData" % excelns).find("{%s}Row" % excelns).text="0"
i.find("{%s}ClientData" % excelns).find("{%s}Column" % excelns).text="0"
for i in check.findall("{%s}shape" % vmlns):
check_ids.append(i.attrib["id"])
row = i.find("{%s}ClientData" % excelns).find("{%s}Row" % excelns).text
col = i.find("{%s}ClientData" % excelns).find("{%s}Column" % excelns).text
check_coords.append((row,col))
# blank the data we are checking separately
i.attrib["id"] = "0"
i.find("{%s}ClientData" % excelns).find("{%s}Row" % excelns).text="0"
i.find("{%s}ClientData" % excelns).find("{%s}Column" % excelns).text="0"
assert set(correct_coords) == set(check_coords)
assert set(correct_ids) == set(check_ids)
diff = compare_xml(get_document_content(correct), get_document_content(check))
assert diff is None, diff
|
{
"content_hash": "2b3d5aa9db8d83432a707b38d8b88a40",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 88,
"avg_line_length": 44.65686274509804,
"alnum_prop": 0.6265642151481888,
"repo_name": "Hitachi-Data-Systems/org-chart-builder",
"id": "cb1b1ea611c38a4c099cb033d988cacb2cede2ff",
"size": "5741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openpyxl/comments/tests/test_comment_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1358145"
}
],
"symlink_target": ""
}
|
import math
def oblicalc(target_angle, angle_of_fall, inclination, rotation=90):
"""Return the oblicuity angle of a shell hitting a ship's armour plate.
All measurement conventions have been kept from Nathan Okun's OBLICALC.EXE program. Pitch and roll
are not considered, as they don't apply in the Naval War College's wargame.
Arguments:
- target_angle (float): the angle in degrees between the line of fire and the target's keel,
measured clockwise from the target's bow.
- angle_of_fall (float): the angle in degrees between the shell's trajectory and the horizontal plane,
measured up from the sea surface.
- inclination (float): the inclination in degrees of the ship's armour plate, with an input of 0
meaning the armour plate is vertical. A negative input means the plate slopes away from the firer,
and faces up. A positive input means the top overhangs the bottom and the plate faces down.
- rotation (float): the rotation of the plate on the deck plane, measured clockwise from the target's
bow. The default input of 90 degrees means the plate is parallel to the keel.
"""
# Convert inputs to radians.
target_angle = math.radians(target_angle)
angle_of_fall = math.radians(angle_of_fall)
inclination = math.radians(inclination)
rotation = math.radians(rotation)
# Aggregate target angle and plate rotation.
target_angle = target_angle + rotation - math.radians(90)
# Calculate the obliquity from the plate's normal.
obliquity = math.acos(math.cos(inclination) * math.sin(target_angle) * math.cos(angle_of_fall)
- math.sin(inclination) * math.sin(angle_of_fall))
# Convert back to degrees
obliquity = math.degrees(obliquity)
return obliquity
print(oblicalc(83, 12, -60))
|
{
"content_hash": "dda1d9488a07c23c1109572bd51d56ca",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 104,
"avg_line_length": 44.92307692307692,
"alnum_prop": 0.7442922374429224,
"repo_name": "doolanshire/Combat-Models",
"id": "cc9376d76f305005f6343794b63cdc9031a811aa",
"size": "1771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "okun/oblicalc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "14642"
},
{
"name": "Python",
"bytes": "73092"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import math
import os
import random
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model.pywrap_saved_model import metrics
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training.tracking import base as trackable_base
from tensorflow.python.util import compat
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if not context.executing_eagerly():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if not context.executing_eagerly():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
def testResourceColocation(self):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default():
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2)
with ops_lib.device("/job:ps/device:GPU:0"):
v = variable_scope.get_variable(
"v0", shape=[10, 2], partitioner=partitioner, use_resource=True)
saver_module.Saver({"v0": v}).build()
save_op = None
for op in ops_lib.get_default_graph().get_operations():
if op.type == "SaveV2":
save_op = op
break
assert save_op is not None
for save_inp in save_op.inputs[3:]:
# Input to SaveV2 op is placed on CPU of the same device as
# the Variable.
self.assertEqual("/job:ps/device:CPU:0", save_inp.device)
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
for _ in range(num_graphs):
with ops_lib.Graph().as_default() as g:
for i in range(20):
resource_variable_ops.ResourceVariable(i, name="var%s" % i)
saver_module.Saver()
graph_defs.append(g.as_graph_def())
for i in range(num_graphs - 1):
self.assertEqual(graph_defs[i], graph_defs[i + 1])
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
self.evaluate(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
self.evaluate(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3, 3.0)
self.assertAllEqual(w4, 4.0)
@test_util.run_in_graph_and_eager_modes
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.executing_eagerly():
sess = None
else:
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEqual(self.evaluate(v), [1])
def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):
with ops_lib.Graph().as_default() as g:
v = resource_variable_ops.ResourceVariable(1.0, name="v")
with ops_lib.name_scope("saver1"):
saver_module.Saver()
with ops_lib.name_scope("saver2"):
saver_module.Saver({"name": v})
ops_in_saver1_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver1/") and
not op.name.startswith("saver1/save/"))]
self.assertEqual(ops_in_saver1_scope_but_not_save_scope, [])
ops_in_saver2_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver2/") and
not op.name.startswith("saver2/save/"))]
self.assertEqual(ops_in_saver2_scope_but_not_save_scope, [])
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# train.Saver is V1 only API.
with ops_lib.Graph().as_default():
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable
},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(
checkpoint_management.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(
checkpoint_management.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.cached_session() as sess:
v0 = variables.VariableV1(-1.0, name="v0")
v1 = variables.VariableV1(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
def testFilenameTensor(self):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default():
v0 = variables.VariableV1(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.cached_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(self.evaluate(tensor), filename)
def testInvalidPath(self):
v0 = variables.VariableV1(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.cached_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegex(
ValueError, "The passed save_path is not a valid checkpoint:"):
save.restore(sess, "invalid path")
@test_util.run_v1_only("train.Saver is V1 only API.")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.cached_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.VariableV1(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
self.evaluate(variables.global_variables_initializer())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.cached_session() as sess:
v = variables.VariableV1(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
self.evaluate(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), self.evaluate(v))
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.VariableV1([10.0], name="v0")
v1 = variables.VariableV1([20.0], name="v1")
v2 = variables.VariableV1([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegex(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegex(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.VariableV1([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegex(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegex(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
@test_util.run_v1_only("train.Saver and VariableV1 are V1 only APIs.")
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
self.evaluate(variables.global_variables_initializer())
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variables.VariableV1(-1.0, name="v0")
v1 = variables.VariableV1(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
self.evaluate(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
self.evaluate(v1)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.VariableV1(1000.0, name="v0")
v1_2 = variables.VariableV1(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if not context.executing_eagerly():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.cached_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with ops_lib.Graph().as_default(), self.cached_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.VariableV1(123.45)
save = saver_module.Saver({"v0": v0_1})
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.VariableV1(543.21)
save = saver_module.Saver({"v0": v0_2})
self.evaluate(variables.global_variables_initializer())
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.VariableV1(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.VariableV1(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
self.evaluate(variables.global_variables_initializer())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(1.0)
twos = variables.VariableV1([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(0.0)
twos = variables.VariableV1([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, self.evaluate(one))
self.assertAllClose([2.0, 2.0, 2.0], self.evaluate(twos))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(3.0, self.evaluate(v2.values()))
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.VariableV1(1.0)
with self.assertRaisesRegex(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.VariableV1(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegex(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.VariableV1([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(0.0)
twos = variables.VariableV1([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, self.evaluate(one))
self.assertAllClose([2.0, 2.0, 2.0], self.evaluate(twos))
@test_util.run_v1_only("train.Saver is V1 only API.")
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.executing_eagerly():
sess = None
else:
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the graph.
save.save(sess, save_path)
with self.cached_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
save.save(sess, save_path)
def testSaveRestoreAndValidateVariableDtype(self):
for variable_op in [
variables.Variable, resource_variable_ops.ResourceVariable
]:
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
# Build the first session.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(10.0, name="v0", dtype=dtypes.float32)
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer()])
save = saver_module.Saver({"v0": v0})
save.save(sess, save_path)
# Start a second session.
with self.session(graph=ops_lib.Graph()) as sess:
v0_wrong_dtype = variable_op(1, name="v0", dtype=dtypes.int32)
# Restore the saved value with different dtype
# in the parameter nodes.
save = saver_module.Saver({"v0": v0_wrong_dtype})
with self.assertRaisesRegex(errors.InvalidArgumentError,
"original dtype"):
save.restore(sess, save_path)
# Test restoring large tensors (triggers a thread pool)
def testRestoreLargeTensors(self):
save_dir = self.get_temp_dir()
def _model():
small_v = [variable_scope.get_variable(
"small%d" % i, shape=[10, 2], use_resource=True) for i in range(5)]
large_v = [variable_scope.get_variable(
"large%d" % i, shape=[32000, 1000], use_resource=True)
for i in range(3)]
return small_v + large_v
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph) as sess:
orig_vars = _model()
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver(max_to_keep=1)
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_dir)
orig_vals = self.evaluate(orig_vars)
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as sess:
restored_vars = _model()
save = saver_module.Saver(max_to_keep=1)
save.restore(sess, save_dir)
restored_vals = self.evaluate(restored_vars)
for orig, restored in zip(orig_vals, restored_vals):
self.assertAllEqual(orig, restored)
def test_metrics_save_restore(self):
api_label = saver_module._SAVER_LABEL
def _get_write_histogram_proto():
proto_bytes = metrics.GetCheckpointWriteDurations(api_label=api_label)
histogram_proto = summary_pb2.HistogramProto()
histogram_proto.ParseFromString(proto_bytes)
return histogram_proto
def _get_read_histogram_proto():
proto_bytes = metrics.GetCheckpointReadDurations(api_label=api_label)
histogram_proto = summary_pb2.HistogramProto()
histogram_proto.ParseFromString(proto_bytes)
return histogram_proto
save_path = os.path.join(self.get_temp_dir(), "metrics_save_restore")
# Values at beginning of unit test.
time_start = metrics.GetTrainingTimeSaved(api_label=api_label)
num_writes_start = _get_write_histogram_proto().num
num_reads_start = _get_read_histogram_proto().num
with self.session(graph=ops_lib.Graph()) as sess:
v0 = resource_variable_ops.ResourceVariable(10.0, name="v0")
v1 = resource_variable_ops.ResourceVariable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Initialize all variables
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer()])
save = saver_module.Saver({
"v0": v0,
"v1": v1,
"v2": v2.saveable
},
restore_sequentially=True)
save.save(sess, save_path)
self.assertEqual(_get_write_histogram_proto().num, num_writes_start + 1)
time_after_one_save = metrics.GetTrainingTimeSaved(api_label=api_label)
self.assertGreater(time_after_one_save, time_start)
with self.session(graph=ops_lib.Graph()) as sess:
v0 = resource_variable_ops.ResourceVariable(-1.0, name="v0")
v1 = resource_variable_ops.ResourceVariable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
self.assertEqual(_get_write_histogram_proto().num, num_writes_start + 1)
self.assertEqual(_get_read_histogram_proto().num, num_reads_start + 1)
# Check that training time saved has not increased.
self.assertEqual(
metrics.GetTrainingTimeSaved(api_label=api_label),
time_after_one_save)
save.save(sess, save_path)
self.assertEqual(_get_write_histogram_proto().num, num_writes_start + 2)
self.assertEqual(_get_read_histogram_proto().num, num_reads_start + 1)
# Check that training time saved has increased.
self.assertGreater(
metrics.GetTrainingTimeSaved(api_label=api_label),
time_after_one_save)
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = checkpoint_management.meta_graph_filename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.VariableV1(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t1.insert("k22", 44.0).run()
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
def testSaverDef(self):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.cached_session():
v0 = variables.VariableV1(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(partitioner=None):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.session() as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.VariableV1(rnd, name=var_name)]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({var_name: vs[0]})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(partitioner=None):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.session() as sess:
if partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.VariableV1(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: new_vs[0]
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
else:
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into the same number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores PartitionedVariable.
saved_full = _save()
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
class SaveRestoreShardedTestV2(SaveRestoreShardedTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
def testIterators(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_iterators")
# Build a graph with 2 parameter nodes on different devices and save.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
ds0 = dataset_ops.Dataset.range(10)
it0 = dataset_ops.make_initializable_iterator(ds0)
get_next0 = it0.get_next()
saveable0 = iterator_ops._IteratorSaveable(
it0._iterator_resource, name="saveable_it0")
with sess.graph.device("/cpu:1"):
ds1 = dataset_ops.Dataset.range(20)
it1 = dataset_ops.make_initializable_iterator(ds1)
get_next1 = it1.get_next()
saveable1 = iterator_ops._IteratorSaveable(
it1._iterator_resource, name="saveable_it1")
saver = saver_module.Saver({
"it0": saveable0,
"it1": saveable1
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(it0.initializer)
self.evaluate(it1.initializer)
self.assertEqual(0, self.evaluate(get_next0))
self.assertEqual(1, self.evaluate(get_next0))
self.assertEqual(0, self.evaluate(get_next1))
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
data_files = glob.glob(save_path + ".data*")
self.assertEqual(2, len(data_files))
# Restore
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
ds0 = dataset_ops.Dataset.range(10)
it0 = dataset_ops.make_initializable_iterator(ds0)
get_next0 = it0.get_next()
saveable0 = iterator_ops._IteratorSaveable(
it0._iterator_resource, name="saveable_it0")
with sess.graph.device("/cpu:1"):
ds1 = dataset_ops.Dataset.range(20)
it1 = dataset_ops.make_initializable_iterator(ds1)
get_next1 = it1.get_next()
saveable1 = iterator_ops._IteratorSaveable(
it1._iterator_resource, name="saveable_it1")
saver = saver_module.Saver({
"it0": saveable0,
"it1": saveable1
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(it0.initializer)
self.evaluate(it1.initializer)
saver.restore(sess, save_path)
self.assertEqual(2, self.evaluate(get_next0))
self.assertEqual(1, self.evaluate(get_next1))
def testIteratorsUnshardedRestore(self):
save_path = os.path.join(self.get_temp_dir(), "restore_unsharded_iterators")
# Build a graph with 2 parameter nodes on different devices and save.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
ds0 = dataset_ops.Dataset.range(10)
it0 = dataset_ops.make_initializable_iterator(ds0)
get_next0 = it0.get_next()
saveable0 = iterator_ops._IteratorSaveable(
it0._iterator_resource, name="saveable_it0")
with sess.graph.device("/cpu:1"):
ds1 = dataset_ops.Dataset.range(20)
it1 = dataset_ops.make_initializable_iterator(ds1)
get_next1 = it1.get_next()
saveable1 = iterator_ops._IteratorSaveable(
it1._iterator_resource, name="saveable_it1")
saver = saver_module.Saver({
"it0": saveable0,
"it1": saveable1
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(it0.initializer)
self.evaluate(it1.initializer)
self.assertEqual(0, self.evaluate(get_next0))
self.assertEqual(1, self.evaluate(get_next0))
self.assertEqual(0, self.evaluate(get_next1))
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
data_files = glob.glob(save_path + ".data*")
self.assertEqual(2, len(data_files))
# Restore
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
ds0 = dataset_ops.Dataset.range(10)
it0 = dataset_ops.make_initializable_iterator(ds0)
get_next0 = it0.get_next()
saveable0 = iterator_ops._IteratorSaveable(
it0._iterator_resource, name="saveable_it0")
with sess.graph.device("/cpu:1"):
ds1 = dataset_ops.Dataset.range(20)
it1 = dataset_ops.make_initializable_iterator(ds1)
get_next1 = it1.get_next()
saveable1 = iterator_ops._IteratorSaveable(
it1._iterator_resource, name="saveable_it1")
saver = saver_module.Saver({
"it0": saveable0,
"it1": saveable1
},
write_version=self._WRITE_VERSION,
sharded=False)
self.evaluate(it0.initializer)
self.evaluate(it1.initializer)
saver.restore(sess, save_path)
self.assertEqual(2, self.evaluate(get_next0))
self.assertEqual(1, self.evaluate(get_next1))
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testMaxToKeepEager(self):
with context.eager_mode():
save_dir = self._get_test_dir("max_to_keep_eager")
v = variable_scope.variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(None, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver({"v": v}, max_to_keep=2)
save2.set_last_checkpoints(save.last_checkpoints)
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
s2 = save2.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
self.evaluate(variables.global_variables_initializer())
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
save = saver_module.Saver({"v": v})
self.evaluate(variables.global_variables_initializer())
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
class RecoverLastCheckpointsTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def test_recover_last_checkpoints(self):
with context.eager_mode():
save_dir = self._get_test_dir("recover_last_checkpoints")
v = variable_scope.variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=10)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "ckpt-1"))
s2 = save.save(None, os.path.join(save_dir, "ckpt-2"))
s3 = save.save(None, os.path.join(save_dir, "ckpt-3"))
self.assertEqual([s1, s2, s3], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s1, s2, s3],
save_dir=save_dir)
# Create another saver and recover last checkpoints.
save2 = saver_module.Saver({"v": v}, max_to_keep=10)
self.assertEqual([], save2.last_checkpoints)
save2.recover_last_checkpoints([s1, s2, s3])
self.assertEqual([s1, s2, s3], save2.last_checkpoints)
# Remove a checkpoint and check that last checkpoints are
# restored correctly.
for fname in gfile.Glob("{}*".format(s1)):
gfile.Remove(fname)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
# Create another saver and recover last checkpoints. The removed
# checkpoint would be correctly omitted.
save3 = saver_module.Saver({"v": v}, max_to_keep=10)
self.assertEqual([], save3.last_checkpoints)
save3.recover_last_checkpoints([s1, s2, s3])
self.assertEqual([s2, s3], save3.last_checkpoints)
s4 = save3.save(None, os.path.join(save_dir, "ckpt-4"))
self.assertCheckpointState(
model_checkpoint_path=s4,
all_model_checkpoint_paths=[s2, s3, s4],
save_dir=save_dir)
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_in_graph_and_eager_modes
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.cached_session() as sess:
v = variable_scope.variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
self.evaluate(variables.global_variables_initializer())
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s4))
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if not context.executing_eagerly():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_v1_only(
"Queue-based input pipelines have been replaced by `tf.data` "
"and not supported in V2.")
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.VariableV1(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.VariableV1([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.VariableV1(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
self.evaluate(variables.global_variables_initializer())
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
self.evaluate(v0))
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
self.evaluate(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, self.evaluate(v1))
@test_util.run_v1_only(
"Exporting/importing meta graphs is only supported in V1.")
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
@test_util.run_v1_only(
"Exporting/importing meta graphs is only supported in V1.")
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.VariableV1([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.VariableV1(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
self.evaluate(variables.global_variables_initializer())
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(33, len(meta_graph_def0.graph_def.node))
self.assertEqual(21, len(meta_graph_def1.graph_def.node))
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.session():
# Creates a graph.
variables.VariableV1(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with ops_lib.Graph().as_default(), self.session():
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with ops_lib.Graph().as_default(), self.session():
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with ops_lib.Graph().as_default(), self.session():
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
@test_util.run_v1_only(
"Exporting/importing meta graphs is only supported in V1.")
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.cached_session():
v1 = variables.VariableV1([20.0], name="v1")
v2 = variables.VariableV1([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
self.evaluate(variables.global_variables_initializer())
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.VariableV1(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0),
variables.VariableV1(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.cached_session() as sess:
# Initializes all the variables.
self.evaluate(init_all_op)
# Runs to logit.
self.evaluate(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
self.evaluate(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
self.evaluate(train_op)
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
# train.Saver and train.import_meta_graph are V1 only APIs.
with ops_lib.Graph().as_default():
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def _testGradientSerDes(self, graph_fn):
"""Tests that gradients can be computed after exporting and importing.
Builds a graph, exports it, and verifies that it can be imported and the
gradient can be built and run correctly.
Args:
graph_fn: takes a single float Tensor argument as input, outputs a single
Tensor
"""
test_dir = self._get_test_dir("nested_control_flow")
filename = os.path.join(test_dir, "metafile")
saver_ckpt = os.path.join(test_dir, "saver.ckpt")
# Create while loop using `outer_body_fn`.
with ops_lib.Graph().as_default():
var = variables.VariableV1(0.0)
var_name = var.name
output = graph_fn(var)
output_name = output.name
init_op = variables.global_variables_initializer()
# Generate a MetaGraphDef containing the while loop.
with session.Session() as sess:
self.evaluate(init_op)
self.evaluate(output)
saver = saver_module.Saver()
saver.save(sess, saver_ckpt)
saver.export_meta_graph(filename)
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with an imported MetaGraphDef.
grad = gradients_impl.gradients([output], [var])
# Turn off constant folding to avoid breaking testNestedControlFlowSerDes.
# It appears that a missing control dependency in the gradient graph
# causes the fetch node to not be triggered.
no_constfold_config = config_pb2.ConfigProto()
no_constfold_config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=no_constfold_config) as sess:
self.evaluate(init_op)
expected_grad_value = self.evaluate(grad)
# Restore the MetaGraphDef into a new Graph.
with ops_lib.Graph().as_default():
with session.Session() as sess:
saver = saver_module.import_meta_graph(filename)
saver.restore(sess, saver_ckpt)
# Make sure we can still build gradients and get the same result.
var = ops_lib.get_default_graph().get_tensor_by_name(var_name)
output = ops_lib.get_default_graph().get_tensor_by_name(output_name)
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session(config=no_constfold_config) as sess:
self.evaluate(init_op)
actual_grad_value = self.evaluate(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
def _testWhileLoopAndGradientSerDes(self, outer_body_fn):
# Build a while loop with `outer_body_fn`, export it, and verify that it can
# be imported and the gradient can be built and run correctly.
# pylint: disable=g-long-lambda
return self._testGradientSerDes(
lambda x: control_flow_ops.while_loop(
lambda i, y: i < 5, outer_body_fn, [0, x])[1])
# pylint: enable=g-long-lambda
def testNestedWhileLoopsSerDes(self):
# Test two simple nested while loops.
def body(i, x):
_, r = control_flow_ops.while_loop(lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])
return i + 1, x + r
self._testWhileLoopAndGradientSerDes(body)
def testNestedControlFlowSerDes(self):
# Test while loop in a cond in a while loop.
# pylint: disable=g-long-lambda
def body(i, x):
cond_result = control_flow_ops.cond(
i > 0,
lambda: control_flow_ops.while_loop(
lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])[1],
lambda: x)
return i + 1, cond_result
# pylint: enable=g-long-lambda
self._testWhileLoopAndGradientSerDes(body)
def testNestedCondsSerDes(self):
# Test conds in a cond.
# pylint: disable=g-long-lambda
self._testGradientSerDes(lambda x: control_flow_ops.cond(
x > 0,
lambda: control_flow_ops.cond(x > 3,
lambda: array_ops.identity(x),
lambda: math_ops.multiply(x, 2.0)),
lambda: control_flow_ops.cond(x < -3,
lambda: constant_op.constant(1.0),
lambda: math_ops.multiply(x, -1.0))))
# pylint: enable=g-long-lambda
@test_util.run_v1_only("This exercises Tensor.op which is meaningless in V2.")
def testStrippedOpListDef(self):
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(0.0)
var = variables.VariableV1(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"AddV2", "Assign", "Const", "Identity", "NoOp",
"PlaceholderWithDefault", "RestoreV2", "SaveSlices", "Sub",
"VariableV2"
])
else:
self.assertEqual(ops, [
"AddV2", "Assign", "Const", "Identity", "NoOp",
"PlaceholderWithDefault", "RestoreV2", "SaveV2", "Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testStripDefaultValuedAttrs(self):
"""Verifies that default valued attrs are stripped, unless disabled."""
# With strip_default_attrs enabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must be removed.
# train.Saver and train.export_meta_graph are V1 only APIs.
with ops_lib.Graph().as_default(), self.cached_session():
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# With strip_default_attrs disabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must *not* be removed, even if they map
# to their defaults.
with ops_lib.Graph().as_default(), self.session():
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
# train.Saver is V1 only API.
with ops_lib.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(
math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=label, logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
self.evaluate(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testImportIntoNamescopeWithoutVariables(self):
# Save a simple graph that contains no variables into a checkpoint.
test_dir = self._get_test_dir("no_vars_graph")
filename = os.path.join(test_dir, "ckpt")
graph_1 = ops_lib.Graph()
with session.Session(graph=graph_1) as sess:
constant_op.constant([1, 2, 3], name="x")
constant_op.constant([1, 2, 3], name="y")
saver = saver_module.Saver(allow_empty=True)
saver.save(sess, filename)
# Create a fresh graph.
graph_2 = ops_lib.Graph()
with session.Session(graph=graph_2) as sess:
# Restore the above checkpoint under scope "subgraph_1".
new_saver_1 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="subgraph_1")
# There are no variables to restore, so import_meta_graph should not
# return a Saver.
self.assertIsNone(new_saver_1)
# Create a variable in graph_2 under scope "my_scope".
variables.VariableV1(array_ops.zeros([10]), name="my_scope/my_var")
self.evaluate(variables.global_variables_initializer())
# Restore the checkpoint into a different scope "subgraph_2".
new_saver_2 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="subgraph_2")
# Because the variable does not live in scope "subgraph_2",
# import_meta_graph should not attempt to restore the variable. So,
# import_meta_graph still won't return a Saver instance.
self.assertIsNone(new_saver_2)
# However, if we restore the checkpoint under scope "my_scope",
# import_meta_graph will detect the variable and return a Saver for
# restoring it. This should happen even when the variable does not
# originate from graph_1.
new_saver_3 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="my_scope")
self.assertIsInstance(new_saver_3, saver_module.Saver)
def testImportIntoImplicitNamescope(self):
# Test that we can import a meta graph into an implicit namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
# train.Saver is V1 only API.
with ops_lib.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(
math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=label, logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
self.evaluate(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
with ops_lib.name_scope("new_model"):
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph)
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testPreserveDatasetAndFunctions(self):
with ops_lib.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
_ = array_ops.identity(next_element, name="output")
# Generate three MetaGraphDef protos using different code paths.
meta_graph_def_simple = saver_module.export_meta_graph()
meta_graph_def_devices_cleared = saver_module.export_meta_graph(
clear_devices=True)
meta_graph_def_from_graph_def = saver_module.export_meta_graph(
clear_devices=True, graph_def=g.as_graph_def())
for meta_graph_def in [meta_graph_def_simple,
meta_graph_def_devices_cleared,
meta_graph_def_from_graph_def]:
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
for i in range(10):
self.assertEqual(i * i, sess.run("new_model/output:0"))
with self.assertRaises(errors.OutOfRangeError):
sess.run("new_model/output:0")
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def testDebugString(self):
# Builds a graph.
v0 = variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.VariableV1(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.cached_session() as sess:
self.evaluate(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = py_checkpoint_reader.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0, v0_tensor)
self.assertAllEqual(v1, v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegex(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegex(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
py_checkpoint_reader.NewCheckpointReader("non-existent")
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.VariableV1(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.VariableV1(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.VariableV1(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with graph.as_default(), self.session() as sess:
self.evaluate(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.VariableV1(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.variables_initializer(rest_variables)
with graph.as_default(), self.session() as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
self.evaluate([weights1, biases1])
# Initialize the rest of the variables and run logits.
self.evaluate(init_rest_op)
self.evaluate(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.VariableV1(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.VariableV1([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with graph1.as_default(), self.session(graph=graph1) as sess:
self.evaluate(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with graph1.as_default(), self.session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to different graph.
graph2 = ops_lib.Graph()
with graph2.as_default():
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.session() as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.VariableV1(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.VariableV1([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.session(graph=graph1) as sess:
self.evaluate(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
with graph2.as_default():
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.VariableV1([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.VariableV1([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.session(graph=graph) as sess:
self.evaluate(variables.global_variables_initializer())
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
with graph1.as_default():
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, self.evaluate(var_dict1["variable1:0"]))
graph2 = ops_lib.Graph()
with graph2.as_default():
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, self.evaluate(var_dict2["variable2:0"]))
class _OwnsAVariableSimple(trackable_base.Trackable):
"""A Trackable object which can be saved using a tf.train.Saver."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
def _gather_saveables_for_checkpoint(self):
return {trackable_base.VARIABLE_VALUE_KEY: self.non_dep_variable}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class _MirroringSaveable(
saver_module.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(trackable_base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {trackable_base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class TrackableCompatibilityTests(test.TestCase):
# TODO(allenl): Track down python3 reference cycles in these tests.
@test_util.run_in_graph_and_eager_modes
def testNotSaveableButIsTrackable(self):
v = _OwnsAVariableSimple()
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
for saver in (saver_module.Saver(var_list=[v]),
saver_module.Saver(var_list={"v": v})):
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
for saver in (saver_module.Saver(var_list=[v]),
saver_module.Saver(var_list={"v": v})):
with self.cached_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
def testSingleTensorEvaluation(self):
class _CountingSaveable(saver_module.BaseSaverBuilder.SaveableObject):
def __init__(self, name):
self.eval_count = 0
def _tensor():
self.eval_count += 1
return constant_op.constant([1.])
dummy_op = constant_op.constant([2.])
super(_CountingSaveable, self).__init__(
dummy_op,
[saver_module.BaseSaverBuilder.SaveSpec(
_tensor, "", name, dtype=dummy_op.dtype,
device=dummy_op.device)],
name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
pass
with context.eager_mode():
v = _CountingSaveable("foo")
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
save_path = saver.save(sess, prefix)
self.assertEqual(1, v.eval_count)
saver.restore(sess, save_path)
self.assertEqual(1, v.eval_count)
def testVariableNotFoundErrorRaised(self):
# Restore does some tricky exception handling to figure out if it should
# load an object-based checkpoint. Tests that the exception handling isn't
# too broad.
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
a = resource_variable_ops.ResourceVariable(1., name="a")
b = resource_variable_ops.ResourceVariable(1., name="b")
a_saver = saver_module.Saver([a])
b_saver = saver_module.Saver([b])
with self.cached_session() as sess:
self.evaluate(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with self.assertRaisesRegex(errors.NotFoundError,
"Key b not found in checkpoint"):
b_saver.restore(sess=sess, save_path=save_path)
with self.assertRaises(errors.NotFoundError) as cs:
b_saver.restore(sess=sess, save_path=save_path)
# Make sure we don't have a confusing "During handling of the above
# exception" block in Python 3.
self.assertNotIn("NewCheckpointReader", cs.exception.message)
@test_util.run_v1_only("train.Saver is V1 only API.")
def testGraphChangedForRestoreErrorRaised(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with ops_lib.Graph().as_default() as g:
a = variables.VariableV1(1., name="a")
a_saver = saver_module.Saver([a])
with self.session(graph=g) as sess:
self.evaluate(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with ops_lib.Graph().as_default() as g:
a = variables.VariableV1([1.], name="a")
a_saver = saver_module.Saver([a])
with self.session(graph=g) as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"a mismatch between the current graph and the graph"):
a_saver.restore(sess=sess, save_path=save_path)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "738cd8d5753cf0cb316bb2889f1a2af0",
"timestamp": "",
"source": "github",
"line_count": 3198,
"max_line_length": 80,
"avg_line_length": 42.53908692933083,
"alnum_prop": 0.6442663922375772,
"repo_name": "frreiss/tensorflow-fred",
"id": "fcd0a99734e7ac98c572a45e4c187c69831ed5ca",
"size": "136728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saver_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:5384")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
{
"content_hash": "aba5cad529e9262b26bae947fc5dcfb2",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 39.5,
"alnum_prop": 0.759493670886076,
"repo_name": "CryptiumCoin/Cryptium",
"id": "19c3e81f9a791159c0b68bcdae49a573442b50f9",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletunlock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32790"
},
{
"name": "C++",
"bytes": "2605583"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18284"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "13384"
},
{
"name": "NSIS",
"bytes": "5944"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69714"
},
{
"name": "QMake",
"bytes": "14714"
},
{
"name": "Shell",
"bytes": "13173"
}
],
"symlink_target": ""
}
|
"""
:class:`DisplayData`, its classes, interfaces and methods.
The classes in this module allow users and transform developers to define
static display data to be displayed when a pipeline runs.
:class:`~apache_beam.transforms.ptransform.PTransform` s,
:class:`~apache_beam.transforms.core.DoFn` s
and other pipeline components are subclasses of the :class:`HasDisplayData`
mixin. To add static display data to a component, you can override the
:meth:`HasDisplayData.display_data()` method.
Available classes:
* :class:`HasDisplayData` - Components that inherit from this class can have
static display data shown in the UI.
* :class:`DisplayDataItem` - This class represents static display data
elements.
* :class:`DisplayData` - Internal class that is used to create display data
and communicate it to the API.
"""
from __future__ import absolute_import
import calendar
import inspect
import json
from builtins import object
from datetime import datetime
from datetime import timedelta
from past.builtins import unicode
__all__ = ['HasDisplayData', 'DisplayDataItem', 'DisplayData']
class HasDisplayData(object):
""" Basic mixin for elements that contain display data.
It implements only the display_data method and a _namespace method.
"""
def display_data(self):
""" Returns the display data associated to a pipeline component.
It should be reimplemented in pipeline components that wish to have
static display data.
Returns:
Dict[str, Any]: A dictionary containing ``key:value`` pairs.
The value might be an integer, float or string value; a
:class:`DisplayDataItem` for values that have more data
(e.g. short value, label, url); or a :class:`HasDisplayData` instance
that has more display data that should be picked up. For example::
{
'key1': 'string_value',
'key2': 1234,
'key3': 3.14159265,
'key4': DisplayDataItem('apache.org', url='http://apache.org'),
'key5': subComponent
}
"""
return {}
def _namespace(self):
return '{}.{}'.format(self.__module__, self.__class__.__name__)
class DisplayData(object):
""" Static display data associated with a pipeline component.
"""
def __init__(self, namespace, display_data_dict):
self.namespace = namespace
self.items = []
self._populate_items(display_data_dict)
def _populate_items(self, display_data_dict):
""" Populates the list of display data items.
"""
for key, element in display_data_dict.items():
if isinstance(element, HasDisplayData):
subcomponent_display_data = DisplayData(element._namespace(),
element.display_data())
self.items += subcomponent_display_data.items
continue
if isinstance(element, DisplayDataItem):
if element.should_drop():
continue
element.key = key
element.namespace = self.namespace
self.items.append(element)
continue
# If it's not a HasDisplayData element,
# nor a dictionary, then it's a simple value
self.items.append(
DisplayDataItem(element,
namespace=self.namespace,
key=key))
@classmethod
def create_from_options(cls, pipeline_options):
""" Creates :class:`DisplayData` from a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` instance.
When creating :class:`DisplayData`, this method will convert the value of
any item of a non-supported type to its string representation.
The normal :meth:`.create_from()` method rejects those items.
Returns:
DisplayData: A :class:`DisplayData` instance with populated items.
Raises:
~exceptions.ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
from apache_beam.options.pipeline_options import PipelineOptions
if not isinstance(pipeline_options, PipelineOptions):
raise ValueError(
'Element of class {}.{} does not subclass PipelineOptions'
.format(pipeline_options.__module__,
pipeline_options.__class__.__name__))
items = {k: (v if DisplayDataItem._get_value_type(v) is not None
else str(v))
for k, v in pipeline_options.display_data().items()}
return cls(pipeline_options._namespace(), items)
@classmethod
def create_from(cls, has_display_data):
""" Creates :class:`DisplayData` from a :class:`HasDisplayData` instance.
Returns:
DisplayData: A :class:`DisplayData` instance with populated items.
Raises:
~exceptions.ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
if not isinstance(has_display_data, HasDisplayData):
raise ValueError('Element of class {}.{} does not subclass HasDisplayData'
.format(has_display_data.__module__,
has_display_data.__class__.__name__))
return cls(has_display_data._namespace(), has_display_data.display_data())
class DisplayDataItem(object):
""" A DisplayDataItem represents a unit of static display data.
Each item is identified by a key and the namespace of the component the
display item belongs to.
"""
typeDict = {str:'STRING',
unicode:'STRING',
int:'INTEGER',
float:'FLOAT',
bool: 'BOOLEAN',
timedelta:'DURATION',
datetime:'TIMESTAMP'}
def __init__(self, value, url=None, label=None,
namespace=None, key=None, shortValue=None):
self.namespace = namespace
self.key = key
self.type = self._get_value_type(value)
self.shortValue = (shortValue if shortValue is not None else
self._get_short_value(value, self.type))
self.value = value
self.url = url
self.label = label
self._drop_if_none = False
self._drop_if_default = False
def drop_if_none(self):
""" The item should be dropped if its value is None.
Returns:
Returns self.
"""
self._drop_if_none = True
return self
def drop_if_default(self, default):
""" The item should be dropped if its value is equal to its default.
Returns:
Returns self.
"""
self._default = default
self._drop_if_default = True
return self
def should_drop(self):
""" Return True if the item should be dropped, or False if it should not
be dropped. This depends on the drop_if_none, and drop_if_default calls.
Returns:
True or False; depending on whether the item should be dropped or kept.
"""
if self._drop_if_none and self.value is None:
return True
if self._drop_if_default and self.value == self._default:
return True
return False
def is_valid(self):
""" Checks that all the necessary fields of the :class:`DisplayDataItem`
are filled in. It checks that neither key, namespace, value or type are
:data:`None`.
Raises:
~exceptions.ValueError: If the item does not have a key, namespace,
value or type.
"""
if self.key is None:
raise ValueError('Invalid DisplayDataItem. Key must not be None')
if self.namespace is None:
raise ValueError('Invalid DisplayDataItem. Namespace must not be None')
if self.value is None:
raise ValueError('Invalid DisplayDataItem. Value must not be None')
if self.type is None:
raise ValueError(
'Invalid DisplayDataItem. Value {} is of an unsupported type.'
.format(self.value))
def _get_dict(self):
res = {'key': self.key,
'namespace': self.namespace,
'type': self.type if self.type != 'CLASS' else 'STRING'}
# TODO: Python Class types should not be special-cased once
# the Fn API is in.
if self.url is not None:
res['url'] = self.url
if self.shortValue is not None:
res['shortValue'] = self.shortValue
if self.label is not None:
res['label'] = self.label
res['value'] = self._format_value(self.value, self.type)
return res
def get_dict(self):
""" Returns the internal-API dictionary representing the
:class:`DisplayDataItem`.
Returns:
Dict[str, Any]: A dictionary. The internal-API dictionary representing
the :class:`DisplayDataItem`.
Raises:
~exceptions.ValueError: if the item is not valid.
"""
self.is_valid()
return self._get_dict()
def __repr__(self):
return 'DisplayDataItem({})'.format(json.dumps(self._get_dict()))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._get_dict() == other._get_dict()
return False
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(tuple(sorted(self._get_dict().items())))
@classmethod
def _format_value(cls, value, type_):
""" Returns the API representation of a value given its type.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
A formatted value in the form of a float, int, or string.
"""
res = value
if type_ == 'CLASS':
res = '{}.{}'.format(value.__module__, value.__name__)
elif type_ == 'DURATION':
res = value.total_seconds()*1000
elif type_ == 'TIMESTAMP':
res = calendar.timegm(value.timetuple())*1000 + value.microsecond//1000
return res
@classmethod
def _get_short_value(cls, value, type_):
""" Calculates the short value for an item.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
The unqualified name of a class if type_ is 'CLASS'. None otherwise.
"""
if type_ == 'CLASS':
return value.__name__
return None
@classmethod
def _get_value_type(cls, value):
""" Infers the type of a given value.
Args:
value: The value whose type needs to be inferred. For 'DURATION' and
'TIMESTAMP', the corresponding Python type is datetime.timedelta and
datetime.datetime respectively. For Python classes, the API type is
just 'STRING' at the moment.
Returns:
One of 'STRING', 'INTEGER', 'FLOAT', 'CLASS', 'DURATION', or
'TIMESTAMP', depending on the type of the value.
"""
#TODO: Fix Args: documentation once the Python classes handling has changed
type_ = cls.typeDict.get(type(value))
if type_ is None:
type_ = 'CLASS' if inspect.isclass(value) else None
if type_ is None and value is None:
type_ = 'STRING'
return type_
|
{
"content_hash": "48ff293070bd32a0dbbeaffa440983c8",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 80,
"avg_line_length": 32.993883792048926,
"alnum_prop": 0.646955232180925,
"repo_name": "charlesccychen/beam",
"id": "614c9a97e291f7c92689ffc16b1787b301e96f8c",
"size": "11574",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/transforms/display.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40964"
},
{
"name": "Dockerfile",
"bytes": "23025"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2385151"
},
{
"name": "Groovy",
"bytes": "276161"
},
{
"name": "HTML",
"bytes": "52535"
},
{
"name": "Java",
"bytes": "23989867"
},
{
"name": "JavaScript",
"bytes": "16472"
},
{
"name": "Jupyter Notebook",
"bytes": "54182"
},
{
"name": "Python",
"bytes": "4274226"
},
{
"name": "Ruby",
"bytes": "4227"
},
{
"name": "Shell",
"bytes": "172322"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Whether to use threads for concordance searches or not.
# (This is an experimental feature.)
from django.conf import settings
if settings.IM_IN_TEST_MODE:
THREADED_SEARCH = False
else:
THREADED_SEARCH = True
import datetime
import re
import time
import copy
from itertools import islice
from logos.constants import PUNCTUATION, STOP_WORDS
from logos.utils import replace_spc_error_handler
from bot.logos_decorators import irc_room_permission_required, \
irc_network_permission_required
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
from django.db.models import Min, Max
from bot.pluginDespatch import Plugin, CommandException
from logos.roomlib import get_room_option, set_room_option, set_global_option, \
get_global_option, get_user_option
from logos.pluginlib import CommandDecodeException
from .models import BibleTranslations, BibleBooks, BibleVerses, \
BibleConcordance, BibleDict, XRefs
from .models import BibleColours
from bibleapp.management.commands._booktbl import book_table
import logging
from logos.settings import LOGGING
from django.db.models import Count
logger = logging.getLogger(__name__)
logging.config.dictConfig(LOGGING)
# from http://stackoverflow.com/questions/3313590/check-for-presence-of-a-sublist-in-python
def contains_sublist(lst, sublst):
n = len(sublst)
return any((sublst == lst[i:i+n]) for i in xrange(len(lst)-n+1))
# Strip out unneeded punctuation (and other fluff) from a list of words
def strip_fluff_to_list(text):
words = []
for w in re.split('\s+', text):
w1 = re.sub("[^a-zA-Z0-9'*]", "", w)
if not re.match("[a-zA-Z0-9']+\*?", w1):
continue
else:
words.append(w1)
return words
def get_book(version, bookwork):
# remove possible spaces between books like "1 John" etc
book = re.sub("\s+", "", bookwork)
bl = len(book)
book_found = None
for smallbk, bigbk1 in book_table:
bigbk = re.sub("\s+", "", bigbk1).lower()
if smallbk == book:
book_found = smallbk
break
if bigbk[0:bl] == book:
book_found = smallbk
break
if not book_found:
trans = BibleTranslations.objects.get(name=version)
if BibleBooks.objects.filter(trans=trans, canonical = book).exists():
return book
else:
return None
return book_found
class BibleBot(Plugin):
# stop words are words that are so common that they
# are not indexed (in the concordance). It makes for a lot
# smaller concordance database.
stop_words = STOP_WORDS
plugin = ("bible", "Bible Bot")
def __init__(self, *args, **kwargs):
super(BibleBot, self).__init__(*args, **kwargs)
self.commands = (\
(r'random$', self.random_verse, "bring up random verse"),
(r'random (?P<translation>[a-zA-Z]+)$', self.random_verse, "bring up random verse"),
(r'view$', self.view_xrefs, "View the next xref"),
(r'(next|n)\s*$', self.next, "read next verse"),
(r'(?:search|s)\s+((?:\w+\s+)?(?:\w+(?:-\w+)?\s+)?[^"]+)$', self.search, "perform a concordance search"),
(r'(?:search|s)\s+((?:\w+\s+)?(?:\w+(?:-\w+)?\s+)?)\"([^"]+)\"\s*$', self.phrase_search, "perform a phrase search"),
# (r'(?:search|s)\s+([^"]+)', self.search, False),
(r'(?:search|s)\s*$', self.next_search, "continue a search"),
(r'set\s+(?P<room>#\S+)\s+default\s+translation\s+([a-zA-Z]+)', self.set_default_trans,\
"set default translation for room"),
(r'set\s+(?:private|pvt)\s+translation\s+([a-zA-Z]+)', self.set_pvt_translation,
"set default translation for private chat window"),
(r'set\s+(?P<room>#\S+)\s+search\s+limit\s+(\d+)\s*$', self.set_search_limit,
"set limit on number of verses searched at once"),
(r'set\s+(?P<room>#\S+)\s+verse\s+limit\s+(\d+)\s*$', self.set_verse_limit,
"set limit on number of verses read at once"),
(r'(?:translations|versions)\s*$', self.versions,
"display available versions or translations"),
(r'dict\s+(\S+)', self.dict, "lookup strongs numbers"),
(r'(\w+\+?\s+)?\d?\s*[a-zA-Z]+\s+\d+\s*(:?\s*\d+\s*(-?\s*\d+)?)?$',
self.verse_lookup, "lookup bible verse", "LastResortMatch"), \
(r'books\s+(.*)',
self.book_names, "show book names for translation"),
(r'xref\s+(.*)',
self.xref, "display xref verses"),
)
# pending_searches used to remember
# where up to with users searches
# so that when they type !next it
# follows on logically
self.pending_searches = {}
self.reading_progress = {}
self.xref_views = {}
def _get_translations(self):
trans = BibleTranslations.objects.all()
trans_list = []
for tr in trans:
trans_list.append(tr.name)
return trans_list
def _get_defaulttranslation(self, channel, nick):
user = self.get_auth().get_user_obj(nick)
res = get_user_option(user, "translation")
if not res:
if channel[0] == '#':
res = get_room_option(self.network, channel,'default_translation')
else:
res = get_global_option('pvt-translation')
if not res:
res = 'kjv' # default translation
return str(res)
def _get_verselimit(self,channel):
""" Get maximum number of verses that may be shown in room at one
bot command """
res = get_room_option(self.network, channel, "verselimit")
if not res:
res = 4 # default verselimit
return res
def _get_searchlimit(self, channel):
""" Get the maximum numbers of verses to be shown while
doing a search in this room """
res = get_room_option(self.network, channel, "searchlimit")
if not res:
res = 5 # default search limit setting
return int(res)
def _get_verses(self, chan, nick, user, passage_ref):
""" Retrieve the passage using the passage lookup supplied.
This will look something like "nasb john 3:3-5"
"""
# Get the maximum number of verses to display in one
# go for the current room
verselimit = int(self._get_verselimit(chan))
# Find default translation for the current room
def_trans = self._get_defaulttranslation(chan, nick)
passage_ref = passage_ref.lower().strip()
mch1 = re.match(r"([a-z\+]+)\s+([1-3]?\s*[a-z]+)\s+(.*)", passage_ref)
mch2 = re.match(r"([1-3]?\s*[a-z]+)\s+(.*)", passage_ref)
if mch1:
mch = mch1
elif mch2:
mch = mch2
else:
raise CommandException(nick, chan, \
"Could not decipher scripture lookup " +\
"reference. Format is " +\
"[translation] <book> <chapter>:<verse>[-<verse>]")
assert mch.lastindex in (2,3)
if mch.lastindex == 2: # No version/translation given
version = def_trans
bookwork = mch.group(1)
versework = mch.group(2)
if mch.lastindex == 3: # all three given version/book/chapter&verse
version = mch.group(1)
bookwork = mch.group(2)
versework = mch.group(3)
try:
trans = BibleTranslations.objects.get(name = version).pk
except BibleTranslations.DoesNotExist:
raise CommandException(nick, user, "Unknown translation %s" % (version,))
# remove possible spaces between books like "1 John" etc
book = get_book(version, bookwork)
if not book:
raise CommandException(user, chan, "Could not find book %s" % (bookwork,))
passage = re.sub(r"(\d+)\s*:\s*(\d+)",r"\1:\2",versework)
splitwork = re.split('(?::|-|\s+)',passage)
chapter = int(splitwork.pop(0))
if splitwork:
firstverse = int(splitwork.pop(0))
if splitwork:
lastverse = int(splitwork.pop(0))
else:
lastverse = firstverse
if lastverse - firstverse <= verselimit-1 and lastverse > firstverse:
versecount = lastverse - firstverse + 1
elif lastverse <= firstverse:
versecount = 1
else:
versecount = verselimit
else:
firstverse = 1
versecount = verselimit
book_db = BibleBooks.objects.get(trans = trans,
canonical = book)
book_id = book_db.pk
long_book_name = book_db.long_book_name
# get qualifying verses
qual_verses = BibleVerses.objects.filter(trans = trans,
book = book_id,
chapter = int(chapter),
verse__gte = firstverse,
verse__lt = firstverse+versecount)
if qual_verses:
for v in qual_verses:
pk = v.pk
qual_verses_next_pk = pk + 1
resp = []
for q in qual_verses:
resp.append((version.upper(),
long_book_name + " " + str(q.chapter) \
+ ":" + str(q.verse),
q.verse_text))
timestamp = datetime.datetime.now()
if nick.lower() not in self.reading_progress:
self.reading_progress[nick.lower()] = {}
self.reading_progress[nick.lower()][chan.lower()] = \
{'verses_pk':qual_verses_next_pk,
'timestamp': timestamp}
else:
raise CommandException(nick, user, \
"Verse {} {}:{} not found".format(long_book_name, chapter, firstverse))
return resp
def _next_reading(self, chan, user):
if user.lower() not in self.reading_progress or \
chan.lower() not in self.reading_progress[user.lower()]:
self.say(chan, "No previous verse to read from")
return None
else:
verses_pk = self.reading_progress[user.lower()][chan.lower()]['verses_pk']
qual_verses = BibleVerses.objects.filter(pk__gte = verses_pk)
# Get the maximum number of verses to display in one
# go for the current room
verselimit = self._get_verselimit(chan)
resp = []
qual_verses_next_pk = None
for q in qual_verses[0:verselimit]:
resp.append((q.trans.name.upper(),
q.book.long_book_name + " " + str(q.chapter) \
+ ":" + str(q.verse),
q.verse_text))
qual_verses_next_pk = q.pk + 1
timestamp = datetime.datetime.now()
if qual_verses_next_pk:
if user.lower() not in self.reading_progress:
self.reading_progress[user.lower()] = {}
self.reading_progress[user.lower()][chan.lower()] = \
{'verses_pk':qual_verses_next_pk,
'timestamp': timestamp}
else:
if chan.lower() in self.reading_progress[user.lower()]:
del self.reading_progress[user.lower()][chan.lower()]
return resp
def _concordance_generator(self, chan, nick, trans, book_range, words, mode="simple"):
sri = 1 # This is the search result index
if book_range[0]:
bk = get_book(trans.name, book_range[0])
br0 = BibleBooks.objects.filter(trans = trans, canonical=bk)\
.first()
else:
br0 = None
if book_range[1]:
bk = get_book(trans.name, book_range[1])
br1 = BibleBooks.objects.filter(trans = trans, canonical=bk)\
.first()
else:
br1 = None
for w in words:
if w == "*":
raise CommandException(nick, chan, \
"Bare wildcards cannot be used in search")
# strip out punctuation from word list
word_list = strip_fluff_to_list(' '.join(words))
# remove commonly occurring stop words from word list
# (being careful not to iterate over list we are removing from)
# word_list2 will contain the original list
stop_words_found = []
word_list2 = copy.copy(word_list)
for wrd in word_list2:
if wrd in self.stop_words:
# remove from orignal list
word_list.remove(wrd)
stop_words_found.append(wrd)
if stop_words_found:
self.say(chan, "Ignoring common words \"%s\"." % (", ".join(stop_words_found)))
# split word list into those that are wildcards
# and those that are not.
normal_words = []
wild_words = []
for w in word_list:
if re.match("([a-zA-Z']+)\*$",w):
wild_words.append(w)
else:
normal_words.append(w)
logger.debug("normal_words = " + str(normal_words))
logger.debug("wild_words = " + str(wild_words))
if len(word_list) == 0:
raise CommandException(nick, chan, \
"At least one non stop word needed in search")
elif len(word_list) > 1:
# To save ourselves some work find the word
# with the lowest number of occurences in concordance
lowest = None
# If there is a book range search only between those books
# otherwise search the entire translation within the
# concordance.
if br0 and br1:
q_results = BibleConcordance.objects.\
filter(trans = trans, book__gte = br0,
book__lte = br1)
else:
q_results = BibleConcordance.objects.filter(trans = trans)
if len(normal_words) > 0:
#s = "word in (" + ", ".join('\'{0}\''.format(re.sub('\'','\'\'', w)) for w in normal_words) +")"
s = "word in (" + ", ".join('\'{0}\''.format(w) for w in normal_words) +")"
logger.debug("where clause is \""+s +"\"")
q0_results = q_results.extra(where=[s])
q0_results = q0_results.values('word').annotate(Count('word'))
# Example format of q_results
# (Pdb) q_results
# [{'word__count': 988, 'word': u'jesus'}, {'word__count': 80, 'word': u'wept'}]
# Now do the same for the wildcards. This turns out to be hard
# to do because there doesn't seem to be a single query which can
# annotate all wildcard matches. So we have to do them one by one.
match_counts = []
for w in wild_words:
q2_results = q_results.all()
mch = re.match("([a-zA-Z0-9']+)", w)
w1 = mch.group(1)
where_str = "word like '%s%%'" % (w1,)
logger.debug("where_str = " + where_str)
q2_results = q2_results.extra(where = [where_str])
counted = q2_results.count()
match_counts.append({'word__count': counted, 'word': w })
logger.debug( "Count of %s = %d" % (w, counted))
# Append the two lists together
if normal_words:
for q in q0_results:
match_counts.append(q)
for q1 in match_counts:
if lowest == None or q1['word__count'] < lowest:
lowest = q1['word__count']
wrd = q1['word']
logger.debug("Lowest word count for %s is %d" % (wrd, lowest))
else: # only one word in word list
wrd = word_list[0]
# Find all occurrences of this word with lowest occurrence
# frequency
word_list.remove(wrd)
logger.debug("Query Concordance DB")
# If a book range is defined the find all
# concordance lookups in that range
mch = re.match("([a-zA-Z']+)\*$",wrd)
if br0 and br1:
# Is this word a wildcard word?
if mch:
w = mch.group(1)
conc_words = BibleConcordance.objects.\
filter(trans = trans, book__gte = br0, book__lte = br1)\
.extra(where=["word like '%s%%'" % w])\
.order_by('book', 'chapter', 'verse')
else:
conc_words = BibleConcordance.objects.\
filter(trans = trans, book__gte = br0, book__lte = br1,\
word = wrd).order_by('book', 'chapter', 'verse')
# Otherwise
else:
# Is this word a wildcard word?
if mch:
w = mch.group(1)
conc_words = BibleConcordance.objects.filter(trans = trans)\
.extra(where=["word like '%s%%'" % w])\
.order_by('book', 'chapter', 'verse')
else:
conc_words = BibleConcordance.objects.filter(trans = trans,
word = wrd).order_by('book', 'chapter', 'verse')
logger.debug("Number of concordance occurrences of word %s = %d" % (wrd, len(conc_words),))
last_book = None
last_chapter = None
last_verse = None
for wrd_rec in conc_words:
found = True
for wrd in normal_words:
if br0 and br1:
if not BibleConcordance.objects.filter(trans = trans,\
word = wrd,\
book = wrd_rec.book,\
chapter = wrd_rec.chapter,\
verse = wrd_rec.verse ).exists():
found = False
break
else:
if not BibleConcordance.objects.filter(trans = trans,\
word = wrd,\
book = wrd_rec.book,\
chapter = wrd_rec.chapter,\
verse = wrd_rec.verse ).exists():
found = False
break
# If found but make sure we are not getting duplicates of
# verse print outs if the words occur twice or more in
# the same verse.
if found and (wrd_rec.book.id != last_book or \
wrd_rec.chapter != last_chapter or \
wrd_rec.verse != last_verse):
last_book = wrd_rec.book.id
last_chapter = wrd_rec.chapter
last_verse = wrd_rec.verse
if mode=="phrase":
verse_text = BibleVerses.objects.filter(trans = trans,
book=wrd_rec.book,
chapter = wrd_rec.chapter,
verse = wrd_rec.verse).first().verse_text
verse_words = strip_fluff_to_list(verse_text.lower())
logger.debug("phrase srch: verse_words = %s" % (str(verse_words),))
logger.debug("phrase srch: word_list = %s" % (str(word_list2),))
if contains_sublist(verse_words, word_list2):
bv = BibleVerses.objects.filter(trans = trans,
book=wrd_rec.book,
chapter=wrd_rec.chapter,
verse=wrd_rec.verse)
verse_text = bv.first().verse_text
logger.debug("In sublist")
yield {'index':sri, 'trans': trans.id, 'book': wrd_rec.book.id,
'chapter': wrd_rec.chapter, 'verse': wrd_rec.verse,
'verse_text':verse_text }
sri += 1
else: # mode == "simple"
found = True
if wild_words:
for wrd in wild_words:
mch = re.match("([a-zA-Z']+)\*$",wrd)
w = mch.group(1)
if not BibleConcordance.objects.filter(trans = trans,\
book = wrd_rec.book,\
chapter = wrd_rec.chapter,\
verse = wrd_rec.verse )\
.extra(where=["word like '%s%%'" % w])\
.exists():
logger.debug("Word %s not found in %s,%d:%d" % (wrd, wrd_rec.book.long_book_name, wrd_rec.chapter, wrd_rec.verse))
found = False
break
if found:
# Add some markup around the search words
bv = BibleVerses.objects.filter(trans = trans,
book=wrd_rec.book,
chapter=wrd_rec.chapter,
verse=wrd_rec.verse)
verse_text = bv.first().verse_text
for wrd in normal_words:
verse_text = re.sub(r"("+wrd+")",
r"<word-match>\1</word-match>",
verse_text,
flags = re.I)
for wrd in wild_words:
wrd = re.sub(r"\*", r"[a-zA-Z]*", wrd)
verse_text = re.sub(r"("+wrd+")",
r"<word-match>\1</word-match>",
verse_text,
flags = re.I)
yield {'index': sri, 'trans': trans.id, 'book': wrd_rec.book.id,
'chapter': wrd_rec.chapter, 'verse': wrd_rec.verse,
'verse_text':verse_text }
sri += 1
# def noticed(self, user, channel, message):
# """ Biblebot receives notice """
# logger.debug('NOTICE: '+ message)
def joined(self, channel):
""" BibleBot joins room """
# Add basic options to room setup
flds = ( ( 'verselimit', 4 ),
( 'searchlimit', 4 ),
( 'default_translation', 'kjv' ),)
network = self.network
for option, value in flds:
opt = get_room_option(network, channel, option)
if not opt:
set_room_option(network, channel, option, value)
def left(self, channel):
""" Called when bible bot leaves channel """
network = self.network
set_room_option(network, channel, 'active', 0)
def book_names(self, regex, chan, nick, **kwargs):
version = regex.group(1)
trans = BibleTranslations.objects.get(name=version)
book_names = []
for bb in BibleBooks.objects.filter(trans = trans):
book_names.append((str(bb.canonical), str(bb.long_book_name)))
if len(book_names) >= 10:
self.notice(nick, str(book_names))
book_names = []
self.notice(nick, str(book_names))
def versions(self, regex, chan, nick, **kwargs):
translations = self._get_translations()
tr_str = ",".join(translations)
self.msg(chan, "Supported translations are %s " % (tr_str,))
def view_xrefs(self, regex, chan, nick, **kwargs):
if nick.lower() not in self.xref_views:
self.say(chan, "*** You need to find xrefs first ***")
return
refs = self.xref_views[nick.lower()][0:3]
del self.xref_views[nick.lower()][0:3]
trans_name = self._get_defaulttranslation(chan, nick)
trans = BibleTranslations.objects.get(name = trans_name)
if not refs:
self.say(chan, "*** No more refs to view ***")
return
for ref in refs:
book_name, refwork = ref.split(' ')
ref1 = refwork.split('-')[0]
chap, vs = ref1.split(':')
book = BibleBooks.objects.get(trans = trans, canonical = book_name)
verse = BibleVerses.objects.get(trans = trans,
book = book,
chapter = chap,
verse = vs)
msg = u"{} {}:{} {}".format(book.long_book_name,
chap,
vs,
verse.verse_text)
self.say(chan, msg)
@irc_network_permission_required('set_pvt_version')
def set_pvt_translation(self, regex, chan, nick, **kwargs):
trans = regex.group(1)
translations = self._get_translations()
if trans not in translations:
self.msg(chan, "Could not locate translation %s " % (def_trans,))
return True
else:
set_global_option('pvt-translation', trans)
self.msg(chan, "Private translation set to %s " % (trans,))
@irc_room_permission_required('set_default_translation')
def set_default_trans(self, regex, chan, nick, **kwargs):
room = regex.group('room')
def_trans = regex.group(2)
translations = self._get_translations()
if def_trans not in translations:
self.msg(chan, "Could not locate translation %s " % (def_trans,))
return
else:
set_room_option(self.factory.network, room, \
'default_translation', def_trans)
self.msg(chan, "Default translation for %s set to %s " % (room,def_trans))
@irc_room_permission_required('set_verse_limits')
def set_search_limit(self, regex, chan, nick, **kwargs):
room = regex.group('room')
searchlmt = int(regex.group(2))
# Get the channel the user is authorised to access
if searchlmt > 20:
self.msg(chan, "Search limit cannot be set higher than 20")
else:
set_room_option(self.factory.network, room, \
'searchlimit', searchlmt)
self.msg(chan, "Search limit for %s set to %s " % (room, searchlmt))
@irc_room_permission_required('set_verse_limits')
def set_verse_limit(self, regex, chan, nick, **kwargs):
room = regex.group('room')
verselmt = int(regex.group(2))
if verselmt > 20:
self.msg(chan, "Verse limit cannot be set higher than 20")
else:
set_room_option(self.factory.network, room, \
'verselimit', verselmt)
self.msg(chan, "Verse limit for %s set to %s " % (room,verselmt))
def random_verse(self, regex, chan, nick, **kwargs):
try:
translation = regex.group('translation')
except IndexError:
translation = self._get_defaulttranslation(chan, nick)
#translation = get_room_option(self.factory.network, chan, "default_translation")
try:
trans = BibleTranslations.objects.get(name = translation)
except BibleTranslations.DoesNotExist:
self.say(chan, "Translation {} not known".format(translation))
return
# Find first index of Genesis and last index of Revelation
verse_range_data = BibleVerses.objects.filter(trans = trans).aggregate(Min('id'), Max('id'))
v1 = verse_range_data['id__min']
v2 = verse_range_data['id__max']
random_scripture = BibleVerses.objects.filter(id__gte = v1, id__lt = v2).order_by("?").first()
trans_name = random_scripture.trans.name
book_name = random_scripture.book.long_book_name
vs_text = random_scripture.verse_text
random_vs = "{} {} {}:{} {}".format(trans_name.upper(),
book_name,
random_scripture.chapter,
random_scripture.verse,
vs_text)
self.say(chan, random_vs)
def dict(self, regex, chan, nick, **kwargs):
lookup = regex.group(1)
lookup = lookup.upper()
try:
dict_obj = BibleDict.objects.get(strongs=lookup)
description = dict_obj.description
self.say(chan, description)
signal_data = {'chan': chan, 'nick': nick, 'strongs':lookup, 'dict':description }
self.signal("dict_lookup", signal_data)
except BibleDict.DoesNotExist:
self.say(chan, "Sorry %s not found" % lookup)
def search(self, regex, chan, nick, **kwargs):
searchlimit = self._get_searchlimit(chan)
words = [x.lower() for x in regex.group(1).strip().split(' ')]
def_trans = self._get_defaulttranslation(chan, nick)
parse_res = self._parse_trans_book_range(def_trans, words)
if len(words) == 0:
self.msg(chan, "Must have at least one word for search")
# self.usage(chan, 'search')
else:
trans = parse_res['translation']
book_range = ( parse_res['book_start'],
parse_res['book_end'] )
book_range_s = self._stringify_book_range(trans, book_range)
self.msg(chan, "searching for \"" + ", ".join(words) +"\"" + \
" in " + trans.upper() + " " + book_range_s + " ....")
trans = BibleTranslations.objects.get(name=trans)
gen = self._concordance_generator(chan, nick, trans, book_range,
words, mode="simple")
if chan.lower() not in self.pending_searches:
self.pending_searches[chan.lower()] = {nick.lower():{}}
if nick.lower() not in self.pending_searches[chan.lower()]:
self.pending_searches[chan.lower()][nick.lower()] = {}
self.pending_searches[chan.lower()][nick.lower()]['gen'] = gen
if chan == '%shell%':
self._search_long_time(chan, nick)
else:
delayed = self.reactor.callLater(3.5, self._search_long_time, chan, nick)
self.pending_searches[chan.lower()][nick.lower()]['delayed'] = delayed
finished = self._format_search_results(chan, nick.lower())
if finished:
del self.pending_searches[chan.lower()][nick.lower()]
def next_search(self, regex, chan, nick, **kwargs):
if nick.lower() in self.pending_searches[chan.lower()]:
gen = self.pending_searches[chan.lower()][nick.lower()]['gen']
delayed = self.reactor.callLater(3.5, self._search_long_time, chan, nick)
self.pending_searches[chan.lower()][nick.lower()]['delayed'] = delayed
finished = self._format_search_results(chan, nick.lower())
if finished:
del self.pending_searches[chan.lower()][nick.lower()]
else:
self.say(chan, "*** There is no currently active search***")
def phrase_search(self, regex, chan, nick, **kwargs):
phrase = regex.group(2)
ref = regex.group(1)
searchlimit = self._get_searchlimit(chan)
words = [x.lower() for x in phrase.strip().split(' ')]
ref_words = [x.lower() for x in ref.strip().split(' ')]
def_trans = self._get_defaulttranslation(chan, nick)
parse_res = self._parse_trans_book_range(def_trans, ref_words)
if len(words) == 0:
self.msg(chan, "Error: Must have at least one word for "+act+"search")
else:
selected_trans = parse_res['translation']
book_range = ( parse_res['book_start'],
parse_res['book_end'] )
book_range_s = self._stringify_book_range(selected_trans, book_range)
self.say(chan, "searching for phrase...\"%s\" in %s %s" % (phrase,selected_trans.upper(),book_range_s))
trans = BibleTranslations.objects.get(name=selected_trans)
gen = self._concordance_generator(chan, nick, trans,
book_range, words, mode="phrase")
if chan.lower() not in self.pending_searches:
self.pending_searches[chan.lower()] = {nick.lower():{}}
self.pending_searches[chan.lower()][nick.lower()]['gen'] = gen
delayed = self.reactor.callLater(3.5, self._search_long_time, chan, nick)
self.pending_searches[chan.lower()][nick.lower()]['delayed'] = delayed
self._format_search_results(chan, nick.lower())
def _get_colour(self, chan, elmt):
try:
clr = BibleColours.objects.get(network=self.network, room=chan,
element=elmt)
return clr.mirc_colour
except BibleColours.DoesNotExist:
return None
def xref(self, regex, chan, nick, **kwargs):
passage_ref = regex.group(1).lower().strip()
# mch1 = re.match(r"([a-z\+]+)\s+([1-3]?\s*[a-z]+)\s+(.*)", passage_ref)
mch = re.match(r"([1-3]?\s*[a-z]+)\s+(.*)", passage_ref)
if not mch:
raise CommandException(nick, chan, \
"Could not decipher scripture xref " +\
"reference. Format is " +\
"<book> <chapter>:<verse>")
assert mch.lastindex == 2
bookwork = mch.group(1)
versework = mch.group(2)
# remove possible spaces between books like "1 John" etc
book = get_book(None, bookwork)
if not book:
raise CommandException(user, chan, "Could not find book %s" % (bookwork,))
# remove embedded spaces
passage = re.sub(r"(\d+)\s*:\s*(\d+)",r"\1:\2",versework)
splitwork = re.split('(?::|-|\s+)',passage)
chapter = int(splitwork.pop(0))
if splitwork:
verse = int(splitwork.pop(0))
else:
raise CommandException(nick, chan, \
"Too many arguments on line")
xrefs = XRefs.objects.filter(primary_book = book,
primary_chapter = chapter,
primary_verse = verse ).order_by('-votes')
xref_list = []
for xref in islice(xrefs, 20):
if xref.xref_book2:
s = "{} {}:{}-{}:{}".format(xref.xref_book1, xref.xref_chapter1, xref.xref_verse1,
xref.xref_chapter2, xref.xref_verse2)
else:
s = "{} {}:{}".format(xref.xref_book1, xref.xref_chapter1, xref.xref_verse1)
xref_list.append(s)
self.xref_views[nick.lower()] = xref_list
xref_resp = ", ".join(xref_list)
self.say(chan, xref_resp)
def verse_lookup(self, regex, chan, nick, **kwargs):
user = kwargs['user']
msg = kwargs['clean_line']
normal_colours = []
normal_colours.append(self._get_colour(chan, "normal-translation"))
normal_colours.append(self._get_colour(chan, "normal-verse-ref"))
normal_colours.append(self._get_colour(chan, "normal-verse-text"))
try:
result = self._get_verses(chan, nick, user, msg)
except BibleBooks.DoesNotExist:
self.say(chan, "Book does not exist in this translation")
return
signal_data = {'nick':nick, 'chan':chan, 'verses':result}
self.signal("verse_lookup", signal_data)
for resp in result:
clr_reply = []
normal_colours1 = copy.copy(normal_colours)
for elmt in resp:
clr = normal_colours1.pop(0)
if clr == None:
clr_reply.append(elmt)
else:
fg,bg = clr.split(",")
clr_reply.append("\x03{},{} ".format(fg,bg)+elmt+" \x03")
reply = ' '.join(clr_reply)
logger.debug(repr(reply))
self.say(chan, reply.encode("utf-8", "replace_spc"))
def next(self, regex, chan, nick, **kwargs):
result = self._next_reading(chan, nick)
signal_data = {'nick':nick, 'chan':chan, 'verses':result}
self.signal("verse_lookup", signal_data)
if result:
for resp in result:
reply = ' '.join(resp)
self.say(chan, reply)
else:
self.say(chan, "No more verses to read")
def _parse_trans_book_range(self, def_trans, words):
results = {}
results['book_start'] = None
results['book_end'] = None
translations = self._get_translations()
trans1 = words[0]
if trans1 in translations:
results['translation'] = trans1
words.pop(0)
else:
results['translation'] = def_trans
if len(words) == 0:
return results
mch = re.match('([1-3]?[a-z]+)$', words[0], re.I)
mch2 = re.match('([1-3]?[a-z]+)-([1-3]?[a-z]+)$', words[0], re.I)
if mch2:
bk_s = mch2.group(1)
bk_e = mch2.group(2)
if get_book(results['translation'], bk_s) and \
get_book(results['translation'], bk_e):
results['book_start'] = bk_s
results['book_end'] = bk_e
words.pop(0)
elif mch:
bk = mch.group(0).lower()
if get_book(results['translation'], bk):
results['book_start'] = words.pop(0)
results['book_end'] = results['book_start']
elif bk == 'nt':
results['book_start'] = 'mat'
results['book_end'] = 'rev'
words.pop(0)
elif bk == 'ot':
results['book_start'] = 'genesis'
results['book_end'] = 'malachi'
words.pop(0)
return results
def _search_long_time(self, chan, nick):
logger.info("_search_long_time called with %s" % str((chan, nick)))
def _search_results(self, chan, nick, results, finished):
if chan != '%shell%':
delayed = self.pending_searches[chan.lower()][nick.lower()]['delayed']
try:
delayed.cancel()
except (AlreadyCalled, AlreadyCancelled) as e:
pass
start_time = self.pending_searches[chan.lower()][nick.lower()]['timestamp']
elapsed = time.clock() - start_time
signal_data = {'chan': chan, 'nick': nick, 'verses':results }
self.signal("verse_search", signal_data)
for result in results:
# remove all <word-match> tags from result stream
# (Not yet implemented anyway.)
result = re.sub(r"<[^>]*>", "", result)
self.say(chan, result)
if finished:
self.say(chan, "*** No more search results")
self.say(chan, "Query took %6.3f seconds " % (elapsed,))
def _threaded_search_results(self, chan, nick, gen):
results = []
finished = False
srch_limit = self._get_searchlimit(chan)
for ii in range(0,srch_limit):
try:
res = gen.next()
trans = BibleTranslations.objects.get(pk=res['trans'])
book = BibleBooks.objects.get(pk=res['book'])
idx = res['index']
chptr = res['chapter']
vrse = res['verse']
verse_txt = res['verse_text']
verse_ref = "{} {}:{}".format(book.long_book_name, chptr, vrse)
clr = self._get_colour(chan, "search-translation")
if clr:
fg,bg = clr.split(",")
trans_name = "\x03{},{} ".format(fg,bg)+trans.name.upper()+" \x03"
else:
trans_name = trans.name.upper()
clr = self._get_colour(chan, "search-verse-ref")
if clr:
fg,bg = clr.split(",")
verse_ref = "\x03{},{} ".format(fg,bg)+verse_ref+" \x03"
clr = self._get_colour(chan, "search-verse-text")
clr_words = self._get_colour(chan,"search-words")
if clr:
fg,bg = clr.split(",")
prefix_clr = "\x03{},{}".format(fg,bg)
if clr_words:
fgw,bgw = clr_words.split(",")
prefix_clw = "\x03{},{}".format(fg,bg)
pieces2 = re.findall(r"<word-match>([^<]+?)</word-match>",verse_txt)
if len(pieces2) > 0:
pieces1 = re.split(r"<word-match>[^<]+?</word-match>",verse_txt)
pieces2.append("")
# temporary hack until phrase search has markup
if 'pieces1' in locals():
assert (len(pieces1) == len(pieces2))
# Here we rely on re.split returning the first list element
# as an empty string if the word match occurs at the beginning
# of the string which seems to be the case
txt = ""
for piece1, piece2 in zip(pieces1, pieces2):
if clr and piece1 != '':
txt += "\x03{},{}{}\x03".format(fg,bg,piece1)
elif not clr:
txt += piece1
if clr_words and piece2 != '':
txt += "\x03{},{}{}\x03".format(fgw,bgw,piece2)
elif not clr_words:
txt += piece2
else:
txt = verse_txt
resp = "[%d] %s %s %s" % (idx, trans_name, verse_ref, txt)
logger.debug( repr(resp))
results.append(resp)
except StopIteration:
# self.say(chan, "*** No more search results")
finished = True
if chan != '%shell%' and THREADED_SEARCH:
self.reactor.callFromThread(self._search_results, chan, nick, results, finished)
else:
self._search_results(chan, nick, results, finished)
def _format_search_results(self, chan, nick):
start_time = time.clock()
self.pending_searches[chan.lower()][nick.lower()]['timestamp'] = start_time
gen = self.pending_searches[chan.lower()][nick.lower()]['gen']
if chan != '%shell%' and THREADED_SEARCH:
self.reactor.callInThread(self._threaded_search_results, chan, nick, gen)
else:
results = self._threaded_search_results(chan, nick, gen)
def _stringify_book_range(self, version, book_range):
if book_range[0] == None:
return "Gen-Rev"
elif book_range[1] == None:
bk = get_book(version, book_range[0])
return bk
else:
bk1 = get_book(version, book_range[0])
bk2 = get_book(version, book_range[1])
if bk1 == bk2:
return bk1
else:
return bk1 + "-" + bk2
|
{
"content_hash": "dd85a81468850bc519587c11699886ab",
"timestamp": "",
"source": "github",
"line_count": 1093,
"max_line_length": 147,
"avg_line_length": 41.68161024702653,
"alnum_prop": 0.4877518767285658,
"repo_name": "kiwiheretic/logos-v2",
"id": "55c9780d325c10f89134bea09b0f560b5b38e53d",
"size": "45570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bibleapp/bot_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "39336"
},
{
"name": "HTML",
"bytes": "90623"
},
{
"name": "JavaScript",
"bytes": "2169514"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Python",
"bytes": "610582"
},
{
"name": "SCSS",
"bytes": "79489"
},
{
"name": "Shell",
"bytes": "5552"
}
],
"symlink_target": ""
}
|
__version__ = '0.2.1'
from .events import Events, EventsException
__all__ = [
Events.__name__,
EventsException.__name__,
]
|
{
"content_hash": "52bc1540d9aaa1a81c338093dfbcf6fd",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 16.625,
"alnum_prop": 0.5939849624060151,
"repo_name": "Graghav/surabi",
"id": "1b095b230db6a9e72ebfb89a1d532927c1ef07d9",
"size": "158",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ADMIN/venv/lib/python2.7/site-packages/events/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39177"
},
{
"name": "CSS",
"bytes": "1313697"
},
{
"name": "HTML",
"bytes": "59143"
},
{
"name": "JavaScript",
"bytes": "3508430"
},
{
"name": "Python",
"bytes": "5679903"
},
{
"name": "Shell",
"bytes": "3262"
}
],
"symlink_target": ""
}
|
from time import sleep
from typing import Optional
from sqlalchemy import Column, Index, Integer, String
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import backref, foreign, relationship
from sqlalchemy.orm.session import make_transient
from airflow.compat.functools import cached_property
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.models import DagRun
from airflow.models.base import ID_LEN, Base
from airflow.models.taskinstance import TaskInstance
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.helpers import convert_camel_to_snake
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.platform import getuser
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have its own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(
String(ID_LEN),
)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(UtcDateTime())
end_date = Column(UtcDateTime())
latest_heartbeat = Column(UtcDateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {'polymorphic_on': job_type, 'polymorphic_identity': 'BaseJob'}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
Index('idx_job_state_heartbeat', state, latest_heartbeat),
)
task_instances_enqueued = relationship(
TaskInstance,
primaryjoin=id == foreign(TaskInstance.queued_by_job_id),
backref=backref('queued_by_job', uselist=False),
)
dag_runs = relationship(
DagRun,
primaryjoin=id == foreign(DagRun.creating_job_id),
backref=backref('creating_job'),
)
"""
TaskInstances which have been enqueued by this Job.
Only makes sense for SchedulerJob and BackfillJob instances.
"""
heartrate = conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC')
def __init__(self, executor=None, heartrate=None, *args, **kwargs):
self.hostname = get_hostname()
if executor:
self.executor = executor
self.executor_class = executor.__class__.__name__
else:
self.executor_class = conf.get('core', 'EXECUTOR')
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
if heartrate is not None:
self.heartrate = heartrate
self.unixname = getuser()
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
super().__init__(*args, **kwargs)
@cached_property
def executor(self):
return ExecutorLoader.get_default_executor()
@classmethod
@provide_session
def most_recent_job(cls, session=None) -> Optional['BaseJob']:
"""
Return the most recent job of this type, if any, based on last
heartbeat received.
This method should be called on a subclass (i.e. on SchedulerJob) to
return jobs of that type.
:param session: Database session
:rtype: BaseJob or None
"""
return session.query(cls).order_by(cls.latest_heartbeat.desc()).limit(1).first()
def is_alive(self, grace_multiplier=2.1):
"""
Is this job currently alive.
We define alive as in a state of RUNNING, and having sent a heartbeat
within a multiple of the heartrate (default of 2.1)
:param grace_multiplier: multiplier of heartrate to require heart beat
within
:type grace_multiplier: number
:rtype: boolean
"""
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds()
< self.heartrate * grace_multiplier
)
@provide_session
def kill(self, session=None):
"""Handles on_kill callback and updates state in database."""
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = timezone.utcnow()
try:
self.on_kill()
except Exception as e:
self.log.error('on_kill() method failed: %s', str(e))
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
"""Will be called when an external kill command is received"""
def heartbeat_callback(self, session=None):
"""Callback that is called during heartbeat. This method should be overwritten."""
def heartbeat(self, only_if_necessary: bool = False):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heart rate is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
:param only_if_necessary: If the heartbeat is not yet due then do
nothing (don't update column, don't call ``heartbeat_callback``)
:type only_if_necessary: boolean
"""
seconds_remaining = 0
if self.latest_heartbeat:
seconds_remaining = self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds()
if seconds_remaining > 0 and only_if_necessary:
return
previous_heartbeat = self.latest_heartbeat
try:
with create_session() as session:
# This will cause it to load from the db
session.merge(self)
previous_heartbeat = self.latest_heartbeat
if self.state in State.terminating_states:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if self.latest_heartbeat:
seconds_remaining = (
self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds()
)
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
# Make the session aware of this object
session.merge(self)
self.latest_heartbeat = timezone.utcnow()
session.commit()
# At this point, the DB has updated.
previous_heartbeat = self.latest_heartbeat
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError:
Stats.incr(convert_camel_to_snake(self.__class__.__name__) + '_heartbeat_failure', 1, 1)
self.log.exception("%s heartbeat got an exception", self.__class__.__name__)
# We didn't manage to heartbeat, so make sure that the timestamp isn't updated
self.latest_heartbeat = previous_heartbeat
def run(self):
"""Starts the job."""
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
with create_session() as session:
self.state = State.RUNNING
session.add(self)
session.commit()
make_transient(self)
try:
self._execute()
# In case of max runs or max duration
self.state = State.SUCCESS
except SystemExit:
# In case of ^C or SIGTERM
self.state = State.SUCCESS
except Exception:
self.state = State.FAILED
raise
finally:
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
|
{
"content_hash": "ca46bb96d04d0456cf78fecf52329cc4",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 108,
"avg_line_length": 36.86008230452675,
"alnum_prop": 0.625544267053701,
"repo_name": "dhuang/incubator-airflow",
"id": "745f248fc4da02a7fcc934c6494a72024cba2cc4",
"size": "9747",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/jobs/base_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, absolute_import, unicode_literals
from sphinx_explorer.util.commander import Commander
import platform
def test_python_mode():
commander = Commander(system="Linux", py2=False)
c = commander(["python", "/test path"])
if platform.system() == "Linux":
assert c == "/bin/bash -c \"python '/test path'\""
if __name__ == "__main__":
import pytest
pytest.main()
|
{
"content_hash": "d0e0afd33d643f4e24357a54aac411e2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 24.38888888888889,
"alnum_prop": 0.6514806378132119,
"repo_name": "pashango2/sphinx-explorer",
"id": "35c437a9f86ce4078ffea4308e7f28401665397a",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_commander.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15218"
},
{
"name": "Python",
"bytes": "292789"
},
{
"name": "QMake",
"bytes": "253"
}
],
"symlink_target": ""
}
|
"""This example downloads a criteria performance report with AWQL. To get report
fields, run get_report_fields.py.
Tags: ReportDefinitionService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
# Specify where to download the file here.
path = '/tmp/report_download.csv'
def main(client, path):
# Initialize appropriate service.
report_downloader = client.GetReportDownloader(version='v201306')
# Create report query.
report_query = ('SELECT CampaignId, AdGroupId, Id, Criteria, CriteriaType, '
'Impressions, Clicks, Cost '
'FROM CRITERIA_PERFORMANCE_REPORT '
'WHERE Status IN [ACTIVE, PAUSED] '
'DURING LAST_7_DAYS')
file_path = report_downloader.DownloadReportWithAwql(report_query,
'CSV',
file_path=path)
print 'Report was downloaded to \'%s\'.' % file_path
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, path)
|
{
"content_hash": "24ffd4f182c2f4f693c923604db729e7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 30.181818181818183,
"alnum_prop": 0.6024096385542169,
"repo_name": "donspaulding/adspygoogle",
"id": "b3db42e1e503cb673444f9673d2c265a7ba4ec40",
"size": "1946",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/adwords/v201306/reporting/download_criteria_report_with_awql.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3734067"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
}
|
"""Recipe module to ensure a checkout is consistent on a bot."""
from recipe_engine import recipe_api
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
class BotUpdateApi(recipe_api.RecipeApi):
def __init__(self, properties, deps_revision_overrides, fail_patch, *args,
**kwargs):
self._deps_revision_overrides = deps_revision_overrides
self._fail_patch = fail_patch
self._last_returned_properties = {}
super(BotUpdateApi, self).__init__(*args, **kwargs)
def initialize(self):
assert len(self.m.buildbucket.build.input.gerrit_changes) <= 1, (
'bot_update does not support more than one '
'buildbucket.build.input.gerrit_changes')
def __call__(self, name, cmd, **kwargs):
"""Wrapper for easy calling of bot_update."""
assert isinstance(cmd, (list, tuple))
bot_update_path = self.resource('bot_update.py')
kwargs.setdefault('infra_step', True)
# If a Git HTTP request is constantly below GIT_HTTP_LOW_SPEED_LIMIT
# bytes/second for GIT_HTTP_LOW_SPEED_TIME seconds then such request will be
# aborted. Otherwise, it would wait for global timeout to be reached.
env = {
'GIT_HTTP_LOW_SPEED_LIMIT': '102400', # in bytes
'GIT_HTTP_LOW_SPEED_TIME': 300, # in seconds
}
with self.m.context(env=env):
with self.m.depot_tools.on_path():
return self.m.python(name, bot_update_path, cmd, **kwargs)
@property
def last_returned_properties(self):
return self._last_returned_properties
def _get_commit_repo_path(self, commit, gclient_config):
"""Returns local path to the repo that the commit is associated with.
The commit must be a self.m.buildbucket.common_pb2.GitilesCommit.
If commit does not specify any repo, returns name of the first solution.
Raises an InfraFailure if the commit specifies a repo unexpected by gclient.
"""
assert gclient_config.solutions, 'gclient_config.solutions is empty'
# if repo is not specified, choose the first solution.
if not (commit.host and commit.project):
return gclient_config.solutions[0].name
assert commit.host and commit.project
repo_url = self.m.gitiles.unparse_repo_url(commit.host, commit.project)
repo_path = self.m.gclient.get_repo_path(
repo_url, gclient_config=gclient_config)
if not repo_path:
raise self.m.step.InfraFailure(
'invalid (host, project) pair in '
'buildbucket.build.input.gitiles_commit: '
'(%r, %r) does not match any of configured gclient solutions '
'and not present in gclient_config.repo_path_map' % (
commit.host, commit.project))
return repo_path
def ensure_checkout(self,
gclient_config=None,
suffix=None,
patch=True,
update_presentation=True,
patch_root=None,
with_branch_heads=False,
with_tags=False,
no_fetch_tags=False,
refs=None,
patch_oauth2=None,
oauth2_json=None,
use_site_config_creds=None,
clobber=False,
root_solution_revision=None,
rietveld=None,
issue=None,
patchset=None,
gerrit_no_reset=False,
gerrit_no_rebase_patch_ref=False,
disable_syntax_validation=False,
manifest_name=None,
patch_refs=None,
ignore_input_commit=False,
set_output_commit=False,
step_test_data=None,
**kwargs):
"""
Args:
gclient_config: The gclient configuration to use when running bot_update.
If omitted, the current gclient configuration is used.
no_fetch_tags: When true, the root git repo being checked out will not
fetch any tags referenced from the references being fetched. When a repo
has many references, it can become a performance bottleneck, so avoid
tags if the checkout will not need them present.
disable_syntax_validation: (legacy) Disables syntax validation for DEPS.
Needed as migration paths for recipes dealing with older revisions,
such as bisect.
manifest_name: The name of the manifest to upload to LogDog. This must
be unique for the whole build.
ignore_input_commit: if True, ignore api.buildbucket.gitiles_commit.
Exists for historical reasons. Please do not use.
set_output_commit: if True, mark the checked out commit as the
primary output commit of this build, i.e. call
api.buildbucket.set_output_gitiles_commit.
In case of multiple repos, the repo is the one specified in
api.buildbucket.gitiles_commit or the first configured solution.
When sorting builds by commit position, this commit will be used.
Requires falsy ignore_input_commit.
step_test_data: a null function that returns test bot_update.py output.
Use test_api.output_json to generate test data.
"""
assert use_site_config_creds is None, "use_site_config_creds is deprecated"
assert rietveld is None, "rietveld is deprecated"
assert issue is None, "issue is deprecated"
assert patchset is None, "patchset is deprecated"
assert patch_oauth2 is None, "patch_oauth2 is deprecated"
assert oauth2_json is None, "oauth2_json is deprecated"
assert not (ignore_input_commit and set_output_commit)
refs = refs or []
# We can re-use the gclient spec from the gclient module, since all the
# data bot_update needs is already configured into the gclient spec.
cfg = gclient_config or self.m.gclient.c
assert cfg is not None, (
'missing gclient_config or forgot api.gclient.set_config(...) before?')
# Construct our bot_update command. This basically be inclusive of
# everything required for bot_update to know:
patch_root = patch_root or self.m.gclient.get_gerrit_patch_root(
gclient_config=cfg)
# Allow patched project's revision if necessary.
# This is important for projects which are checked out as DEPS of the
# gclient solution.
self.m.gclient.set_patch_repo_revision(cfg)
reverse_rev_map = self.m.gclient.got_revision_reverse_mapping(cfg)
flags = [
# What do we want to check out (spec/root/rev/reverse_rev_map).
['--spec-path', self.m.raw_io.input(
self.m.gclient.config_to_pythonish(cfg))],
['--patch_root', patch_root],
['--revision_mapping_file', self.m.json.input(reverse_rev_map)],
['--git-cache-dir', cfg.cache_dir],
['--cleanup-dir', self.m.path['cleanup'].join('bot_update')],
# Hookups to JSON output back into recipes.
['--output_json', self.m.json.output()],
]
# How to find the patch, if any
if patch:
repo_url = self.m.tryserver.gerrit_change_repo_url
fetch_ref = self.m.tryserver.gerrit_change_fetch_ref
target_ref = self.m.tryserver.gerrit_change_target_ref
if repo_url and fetch_ref:
flags.append([
'--patch_ref',
'%s@%s:%s' % (repo_url, target_ref, fetch_ref),
])
if patch_refs:
flags.extend(
['--patch_ref', patch_ref]
for patch_ref in patch_refs)
# Compute requested revisions.
revisions = {}
for solution in cfg.solutions:
if solution.revision:
revisions[solution.name] = solution.revision
# HACK: ensure_checkout API must be redesigned so that we don't pass such
# parameters. Existing semantics is too opiniated.
in_commit = self.m.buildbucket.gitiles_commit
in_commit_rev = in_commit.id or in_commit.ref
if not ignore_input_commit and in_commit_rev:
# Note: this is not entirely correct. build.input.gitiles_commit
# definition says "The Gitiles commit to run against.".
# However, here we ignore it if the config specified a revision.
# This is necessary because existing builders rely on this behavior,
# e.g. they want to force refs/heads/master at the config level.
in_commit_repo_path = self._get_commit_repo_path(in_commit, cfg)
revisions[in_commit_repo_path] = (
revisions.get(in_commit_repo_path) or in_commit_rev)
parsed_solution_urls = set(
self.m.gitiles.parse_repo_url(s.url) for s in cfg.solutions)
if (in_commit.id and in_commit.ref
and (in_commit.host, in_commit.project) in parsed_solution_urls):
refs = [in_commit.ref] + refs
# Guarantee that first solution has a revision.
# TODO(machenbach): We should explicitly pass HEAD for ALL solutions
# that don't specify anything else.
first_sol = cfg.solutions[0].name
revisions[first_sol] = revisions.get(first_sol) or 'HEAD'
if cfg.revisions:
# Only update with non-empty values. Some recipe might otherwise
# overwrite the HEAD default with an empty string.
revisions.update(
(k, v) for k, v in cfg.revisions.items() if v)
if cfg.solutions and root_solution_revision:
revisions[first_sol] = root_solution_revision
# Allow for overrides required to bisect into rolls.
revisions.update(self._deps_revision_overrides)
# Compute command-line parameters for requested revisions.
# Also collect all fixed revisions to simulate them in the json output.
# Fixed revision are the explicit input revisions of bot_update.py, i.e.
# every command line parameter "--revision name@value".
fixed_revisions = {}
for name, revision in sorted(revisions.items()):
fixed_revision = self.m.gclient.resolve_revision(revision)
if fixed_revision:
fixed_revisions[name] = fixed_revision
if fixed_revision.upper() == 'HEAD':
# Sync to correct destination ref if HEAD was specified.
fixed_revision = self._destination_ref(cfg, name)
# If we're syncing to a ref, we want to make sure it exists before
# trying to check it out.
if (fixed_revision.startswith('refs/') and
# TODO(crbug.com/874501): fetching additional refs is currently
# only supported for the root solution. We should investigate
# supporting it for other dependencies.
cfg.solutions and
cfg.solutions[0].name == name):
# Handle the "ref:revision" syntax, e.g.
# refs/branch-heads/4.2:deadbeef
refs.append(fixed_revision.split(':')[0])
flags.append(['--revision', '%s@%s' % (name, fixed_revision)])
for ref in refs:
assert not ref.startswith('refs/remotes/'), (
'The "refs/remotes/*" syntax is not supported.\n'
'The "remotes" syntax is dependent on the way the local repo is '
'configured, and while there are defaults that can often be '
'assumed, there is no guarantee the mapping will always be done in '
'a particular way.')
# Add extra fetch refspecs.
for ref in refs:
flags.append(['--refs', ref])
# Filter out flags that are None.
cmd = [item for flag_set in flags
for item in flag_set if flag_set[1] is not None]
if clobber:
cmd.append('--clobber')
if with_branch_heads or cfg.with_branch_heads:
cmd.append('--with_branch_heads')
if with_tags or cfg.with_tags:
cmd.append('--with_tags')
if gerrit_no_reset:
cmd.append('--gerrit_no_reset')
if no_fetch_tags:
cmd.append('--no_fetch_tags')
if gerrit_no_rebase_patch_ref:
cmd.append('--gerrit_no_rebase_patch_ref')
if disable_syntax_validation or cfg.disable_syntax_validation:
cmd.append('--disable-syntax-validation')
# Inject Json output for testing.
first_sln = cfg.solutions[0].name
step_test_data = step_test_data or (lambda: self.test_api.output_json(
patch_root, first_sln, reverse_rev_map, self._fail_patch,
fixed_revisions=fixed_revisions))
name = 'bot_update'
if not patch:
name += ' (without patch)'
if suffix:
name += ' - %s' % suffix
# Ah hah! Now that everything is in place, lets run bot_update!
step_result = None
try:
# 87 and 88 are the 'patch failure' codes for patch download and patch
# apply, respectively. We don't actually use the error codes, and instead
# rely on emitted json to determine cause of failure.
step_result = self(
name, cmd, step_test_data=step_test_data,
ok_ret=(0, 87, 88), **kwargs)
except self.m.step.StepFailure as f:
step_result = f.result
raise
finally:
if step_result and step_result.json.output:
result = step_result.json.output
self._last_returned_properties = result.get('properties', {})
if update_presentation:
# Set properties such as got_revision.
for prop_name, prop_value in (
self.last_returned_properties.items()):
step_result.presentation.properties[prop_name] = prop_value
# Add helpful step description in the step UI.
if 'step_text' in result:
step_text = result['step_text']
step_result.presentation.step_text = step_text
# Export the step results as a Source Manifest to LogDog.
source_manifest = result.get('source_manifest', {})
if manifest_name:
if not patch:
# The param "patched" is purely cosmetic to mean "if false, this
# bot_update run exists purely to unpatch an existing patch".
manifest_name += '_unpatched'
self.m.source_manifest.set_json_manifest(
manifest_name, source_manifest)
# Set output commit of the build.
if set_output_commit:
# As of April 2019, got_revision describes the output commit,
# the same commit that Build.output.gitiles_commit describes.
# In particular, users tend to set got_revision to make Milo display
# it. Derive output commit from got_revision.
out_commit = common_pb2.GitilesCommit(
id=self._last_returned_properties['got_revision'],
)
out_solution = reverse_rev_map['got_revision']
out_manifest = result['manifest'][out_solution]
assert out_manifest['revision'] == out_commit.id, (
out_manifest, out_commit.id)
out_commit.host, out_commit.project = (
self.m.gitiles.parse_repo_url(out_manifest['repository'])
)
# Determine the output ref.
got_revision_cp = self._last_returned_properties.get('got_revision_cp')
in_rev = revisions.get(out_solution)
if got_revision_cp:
# If commit position string is available, read the ref from there.
out_commit.ref, out_commit.position = (
self.m.commit_position.parse(got_revision_cp))
elif in_rev.startswith('refs/'):
# If we were asked to check out a specific ref, use it as output
# ref.
out_commit.ref = in_rev
elif in_rev == 'HEAD':
# bot_update.py interprets HEAD as refs/heads/master
out_commit.ref = 'refs/heads/master'
elif out_commit.id == in_commit.id and in_commit.ref:
# Derive output ref from the input ref.
out_commit.ref = in_commit.ref
else: # pragma: no cover
assert False, (
'Unsupposed case. '
'Call buildbucket.set_output_gitiles_commit directly.'
)
self.m.buildbucket.set_output_gitiles_commit(out_commit)
# Set the "checkout" path for the main solution.
# This is used by the Chromium module to figure out where to look for
# the checkout.
# If there is a patch failure, emit another step that said things
# failed.
if result.get('patch_failure'):
return_code = result.get('patch_apply_return_code')
patch_body = result.get('failed_patch_body')
try:
if return_code == 3:
# This is download failure, hence an infra failure.
with self.m.context(infra_steps=True):
self.m.python.failing_step(
'Patch failure', 'Git reported a download failure')
else:
# This is actual patch failure.
self.m.tryserver.set_patch_failure_tryjob_result()
self.m.cq.set_do_not_retry_build()
self.m.python.failing_step(
'Patch failure', 'See attached log. Try rebasing?')
except self.m.step.StepFailure as e:
if patch_body:
e.result.presentation.logs['patch error'] = (
patch_body.splitlines())
raise e
# bot_update actually just sets root to be the folder name of the
# first solution.
if (result.get('did_run')
and 'checkout' not in self.m.path
and 'root' in result):
co_root = result['root']
cwd = self.m.context.cwd or self.m.path['start_dir']
self.m.path['checkout'] = cwd.join(*co_root.split(self.m.path.sep))
return step_result
def _destination_ref(self, cfg, path):
"""Returns the ref branch of a CL for the matching project if available or
HEAD otherwise.
If there's no Gerrit CL associated with the run, returns 'HEAD'.
Otherwise this queries Gerrit for the correct destination ref, which
might differ from refs/heads/master.
Args:
cfg: The used gclient config.
path: The DEPS path of the project this prefix is for. E.g. 'src' or
'src/v8'. The query will only be made for the project that matches
the CL's project.
Returns:
A destination ref as understood by bot_update.py if available
and if different from refs/heads/master, returns 'HEAD' otherwise.
"""
# Ignore project paths other than the one belonging to the current CL.
patch_path = self.m.gclient.get_gerrit_patch_root(gclient_config=cfg)
if patch_path:
patch_path = patch_path.replace(self.m.path.sep, '/')
if not patch_path or path != patch_path:
return 'HEAD'
target_ref = self.m.tryserver.gerrit_change_target_ref
if target_ref == 'refs/heads/master':
return 'HEAD'
return target_ref
def resolve_fixed_revision(self, bot_update_json, name):
"""Set a fixed revision for a single dependency using project revision
properties.
"""
rev_properties = self.get_project_revision_properties(name)
self.m.gclient.c.revisions = {
name: bot_update_json['properties'][rev_properties[0]]
}
def _resolve_fixed_revisions(self, bot_update_json):
"""Set all fixed revisions from the first sync to their respective
got_X_revision values.
If on the first sync, a revision was requested to be HEAD, this avoids
using HEAD potentially resolving to a different revision on the second
sync. Instead, we sync explicitly to whatever was checked out the first
time.
Example (chromium trybot used with v8 patch):
First sync was called with
bot_update.py --revision src@abc --revision src/v8@HEAD
Fixed revisions are: src, src/v8
Got_revision_mapping: src->got_revision, src/v8->got_v8_revision
got_revision = abc, got_v8_revision = deadbeef
Second sync will be called with
bot_update.py --revision src@abc --revision src/v8@deadbeef
Example (chromium trybot used with chromium DEPS change, changing v8 from
"v8_before" to "v8_after"):
First sync was called with
bot_update.py --revision src@abc
Fixed revisions are: src
Got_revision_mapping: src->got_revision, src/v8->got_v8_revision
got_revision = abc, got_v8_revision = v8_after
Second sync will be called with
bot_update.py --revision src@abc
When deapplying the patch, v8 will be synced to v8_before.
"""
for name in bot_update_json.get('fixed_revisions', {}):
rev_properties = self.get_project_revision_properties(name)
if (rev_properties and
bot_update_json['properties'].get(rev_properties[0])):
self.m.gclient.c.revisions[name] = str(
bot_update_json['properties'][rev_properties[0]])
# TODO(machenbach): Replace usages of this method eventually by direct calls
# to the manifest output.
def get_project_revision_properties(self, project_name, gclient_config=None):
"""Returns all property names used for storing the checked-out revision of
a given project.
Args:
project_name (str): The name of a checked-out project as deps path, e.g.
src or src/v8.
gclient_config: The gclient configuration to use. If omitted, the current
gclient configuration is used.
Returns (list of str): All properties that'll hold the checked-out revision
of the given project. An empty list if no such properties exist.
"""
cfg = gclient_config or self.m.gclient.c
# Sort for determinism. We might have several properties for the same
# project, e.g. got_revision and got_webrtc_revision.
rev_reverse_map = self.m.gclient.got_revision_reverse_mapping(cfg)
return sorted(
prop
for prop, project in rev_reverse_map.items()
if project == project_name
)
def deapply_patch(self, bot_update_step):
"""Deapplies a patch, taking care of DEPS and solution revisions properly.
"""
bot_update_json = bot_update_step.json.output
# We only override first solution here to make sure that we correctly revert
# changes to DEPS file, which is particularly important for auto-rolls. It
# is also imporant that we do not assume that corresponding revision is
# stored in the 'got_revision' as some gclient configs change the default
# mapping for their own purposes.
first_solution_name = self.m.gclient.c.solutions[0].name
rev_property = self.get_project_revision_properties(first_solution_name)[0]
self.m.gclient.c.revisions[first_solution_name] = str(
bot_update_json['properties'][rev_property])
self._resolve_fixed_revisions(bot_update_json)
self.ensure_checkout(
patch=False, no_fetch_tags=True, update_presentation=False)
|
{
"content_hash": "1a8bfd622298c76c6c24d742c6024fda",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 81,
"avg_line_length": 42.99047619047619,
"alnum_prop": 0.6392556490917146,
"repo_name": "endlessm/chromium-browser",
"id": "670081584ebaa961db53532a50271b68c85ba75f",
"size": "22733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/depot_tools/recipes/recipe_modules/bot_update/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Unit tests for the NLTK modules. These tests are intended to ensure
that source code changes don't accidentally introduce bugs.
For instructions, please see:
../../web/dev/local_testing.rst
https://github.com/nltk/nltk/blob/develop/web/dev/local_testing.rst
"""
|
{
"content_hash": "0feeedd4b3a5ab7c758a9b571f1dc7e7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.7183098591549296,
"repo_name": "MyRookie/SentimentAnalyse",
"id": "0ea0d4c26aed77f452007db8fa2aab6d473dee96",
"size": "482",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/nltk/test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "316238"
},
{
"name": "C++",
"bytes": "5171"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Prolog",
"bytes": "60188"
},
{
"name": "Python",
"bytes": "13690978"
},
{
"name": "Shell",
"bytes": "8340"
},
{
"name": "TeX",
"bytes": "212"
}
],
"symlink_target": ""
}
|
from asynctnt import Response
from asynctnt.exceptions import TarantoolSchemaError
from tests import BaseTarantoolTestCase
class UpsertTestCase(BaseTarantoolTestCase):
async def _fill_data(self):
data = [
[0, 'a', 1],
[1, 'b', 0],
]
for t in data:
await self.conn.insert(self.TESTER_SPACE_ID, t)
return data
async def test__upsert_empty_one_assign(self):
data = [0, 'hello2', 1, 4, 'what is up']
res = await self.conn.upsert(self.TESTER_SPACE_ID,
data, [['=', 2, 2]])
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [], 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_ID, [0])
self.assertResponseEqual(res, [data], 'Body ok')
async def test__upsert_update_one_assign(self):
data = [0, 'hello2', 1, 4, 'what is up']
await self.conn.insert(self.TESTER_SPACE_ID, data)
res = await self.conn.upsert(self.TESTER_SPACE_ID,
data, [['=', 2, 2]])
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [], 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_ID, [0])
data[2] = 2
self.assertResponseEqual(res, [data], 'Body ok')
async def test__upsert_by_name(self):
data = [0, 'hello2', 1, 4, 'what is up']
await self.conn.upsert(self.TESTER_SPACE_NAME,
data, [['=', 2, 2]])
res = await self.conn.select(self.TESTER_SPACE_ID, [0])
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [data], 'Body ok')
async def test__upsert_by_name_no_schema(self):
await self.tnt_reconnect(fetch_schema=False)
with self.assertRaises(TarantoolSchemaError):
await self.conn.upsert(self.TESTER_SPACE_NAME,
[0, 'hello', 1], [['=', 2, 2]])
async def test__upsert_dict_key(self):
data = {
'f1': 0,
'f2': 'hello',
'f3': 1,
'f4': 2,
'f5': 100,
}
res = await self.conn.upsert(self.TESTER_SPACE_ID,
data, [['=', 2, 2]])
self.assertResponseEqual(res, [], 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_ID, [0])
self.assertResponseEqual(res,
[[0, 'hello', 1, 2, 100]],
'Body ok')
async def test__usert_dict_resp_no_effect(self):
data = {
'f1': 0,
'f2': 'hello',
'f3': 1,
'f4': 10,
'f5': 1000,
}
res = await self.conn.upsert(self.TESTER_SPACE_ID, data, [['=', 2, 2]])
self.assertResponseEqual(res, [], 'Body ok')
|
{
"content_hash": "3d197ecdbbc31f4129d91927d3b3b8fe",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 35.19565217391305,
"alnum_prop": 0.5231624459542927,
"repo_name": "igorcoding/asynctnt",
"id": "e22641f62ea55f0b5a40ddd9be560f4bae74e3f3",
"size": "3238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_op_upsert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26705"
},
{
"name": "Cython",
"bytes": "150315"
},
{
"name": "Lua",
"bytes": "9311"
},
{
"name": "Makefile",
"bytes": "1508"
},
{
"name": "Python",
"bytes": "236013"
},
{
"name": "Shell",
"bytes": "114"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'org/jetbrains'
_MODULE_NAME = 'annotations'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
{
"content_hash": "681d413a27d5c6eb3387b92736316832",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.4,
"alnum_prop": 0.5937215650591446,
"repo_name": "chromium/chromium",
"id": "0ee9845162d458b1efa34e14e265dc8911e68370",
"size": "2465",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/android_deps/libs/org_jetbrains_annotations/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
from neutronclient.neutron.v2_0.lb import member
from neutronclient.tests.unit import test_cli20
class CLITestV20LbMemberJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20LbMemberJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_member(self):
"""lb-member-create with mandatory params only."""
resource = 'member'
cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)
address = '10.0.0.1'
port = '8080'
tenant_id = 'my-tenant'
my_id = 'my-id'
pool_id = 'pool-id'
args = ['--address', address, '--protocol-port', port,
'--tenant-id', tenant_id, pool_id]
position_names = ['address', 'protocol_port', 'tenant_id', 'pool_id',
'admin_state_up']
position_values = [address, port, tenant_id, pool_id, True]
self._test_create_resource(resource, cmd, None, my_id, args,
position_names, position_values,
admin_state_up=None)
def test_create_member_all_params(self):
"""lb-member-create with all available params."""
resource = 'member'
cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)
address = '10.0.0.1'
admin_state_up = False
port = '8080'
weight = '1'
tenant_id = 'my-tenant'
my_id = 'my-id'
pool_id = 'pool-id'
args = ['--address', address, '--admin-state-down',
'--protocol-port', port, '--weight', weight,
'--tenant-id', tenant_id, pool_id]
position_names = [
'address', 'admin_state_up', 'protocol_port', 'weight',
'tenant_id', 'pool_id'
]
position_values = [address, admin_state_up, port, weight,
tenant_id, pool_id]
self._test_create_resource(resource, cmd, None, my_id, args,
position_names, position_values,
admin_state_up=None)
def test_list_members(self):
"""lb-member-list."""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_members_pagination(self):
"""lb-member-list."""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_members_sort(self):
"""lb-member-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_members_limit(self):
"""lb-member-list -P."""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_member_id(self):
"""lb-member-show test_id."""
resource = 'member'
cmd = member.ShowMember(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_member_id_name(self):
"""lb-member-show."""
resource = 'member'
cmd = member.ShowMember(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_member(self):
"""lb-member-update myid --name myname --tags a b."""
resource = 'member'
cmd = member.UpdateMember(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], })
def test_delete_member(self):
"""lb-member-delete my-id."""
resource = 'member'
cmd = member.DeleteMember(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
class CLITestV20LbMemberXML(CLITestV20LbMemberJSON):
format = 'xml'
|
{
"content_hash": "da006f7bf65c2af1c03dbc40aecd6ee8",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 40.47826086956522,
"alnum_prop": 0.5417830290010741,
"repo_name": "ralphwort/chef-repo",
"id": "b3f4cc87bb951370d1cb3f46794bc7f61ff72270",
"size": "5326",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "build/python-neutronclient/neutronclient/tests/unit/lb/test_cli20_member.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1640"
},
{
"name": "CSS",
"bytes": "36959"
},
{
"name": "CoffeeScript",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "20110"
},
{
"name": "Pascal",
"bytes": "82867"
},
{
"name": "Perl",
"bytes": "884"
},
{
"name": "Python",
"bytes": "3746105"
},
{
"name": "Ruby",
"bytes": "1133327"
},
{
"name": "Shell",
"bytes": "24142"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, IntervalIndex, Interval
import pandas.util.testing as tm
class TestIntervalIndex(object):
def setup_method(self, method):
self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_loc_with_scalar(self):
s = self.s
expected = s.iloc[:3]
tm.assert_series_equal(expected, s.loc[:3])
tm.assert_series_equal(expected, s.loc[:2.5])
tm.assert_series_equal(expected, s.loc[0.1:2.5])
tm.assert_series_equal(expected, s.loc[-1:3])
expected = s.iloc[1:4]
tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]])
tm.assert_series_equal(expected, s.loc[[2, 3, 4]])
tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]])
expected = s.iloc[2:5]
tm.assert_series_equal(expected, s.loc[s >= 2])
# TODO: check this behavior is consistent with test_interval_new.py
def test_getitem_with_scalar(self):
s = self.s
expected = s.iloc[:3]
tm.assert_series_equal(expected, s[:3])
tm.assert_series_equal(expected, s[:2.5])
tm.assert_series_equal(expected, s[0.1:2.5])
tm.assert_series_equal(expected, s[-1:3])
expected = s.iloc[1:4]
tm.assert_series_equal(expected, s[[1.5, 2.5, 3.5]])
tm.assert_series_equal(expected, s[[2, 3, 4]])
tm.assert_series_equal(expected, s[[1.5, 3, 4]])
expected = s.iloc[2:5]
tm.assert_series_equal(expected, s[s >= 2])
# TODO: check this behavior is consistent with test_interval_new.py
@pytest.mark.parametrize('direction', ['increasing', 'decreasing'])
def test_nonoverlapping_monotonic(self, direction, closed):
tpls = [(0, 1), (2, 3), (4, 5)]
if direction == 'decreasing':
tpls = tpls[::-1]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
s = Series(list('abc'), idx)
for key, expected in zip(idx.left, s):
if idx.closed_left:
assert s[key] == expected
assert s.loc[key] == expected
else:
with pytest.raises(KeyError):
s[key]
with pytest.raises(KeyError):
s.loc[key]
for key, expected in zip(idx.right, s):
if idx.closed_right:
assert s[key] == expected
assert s.loc[key] == expected
else:
with pytest.raises(KeyError):
s[key]
with pytest.raises(KeyError):
s.loc[key]
for key, expected in zip(idx.mid, s):
assert s[key] == expected
assert s.loc[key] == expected
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_with_interval(self):
s = self.s
expected = 0
result = s.loc[Interval(0, 1)]
assert result == expected
result = s[Interval(0, 1)]
assert result == expected
expected = s.iloc[3:5]
result = s.loc[Interval(3, 6)]
tm.assert_series_equal(expected, result)
expected = s.iloc[3:5]
result = s.loc[[Interval(3, 6)]]
tm.assert_series_equal(expected, result)
expected = s.iloc[3:5]
result = s.loc[[Interval(3, 5)]]
tm.assert_series_equal(expected, result)
# missing
with pytest.raises(KeyError):
s.loc[Interval(-2, 0)]
with pytest.raises(KeyError):
s[Interval(-2, 0)]
with pytest.raises(KeyError):
s.loc[Interval(5, 6)]
with pytest.raises(KeyError):
s[Interval(5, 6)]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_with_slices(self):
s = self.s
# slice of interval
with pytest.raises(NotImplementedError):
s.loc[Interval(3, 6):]
with pytest.raises(NotImplementedError):
s[Interval(3, 6):]
expected = s.iloc[3:5]
result = s[[Interval(3, 6)]]
tm.assert_series_equal(expected, result)
# slice of scalar with step != 1
with pytest.raises(ValueError):
s[0:4:2]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_with_overlaps(self):
s = self.s
expected = s.iloc[[3, 4, 3, 4]]
result = s.loc[[Interval(3, 6), Interval(3, 6)]]
tm.assert_series_equal(expected, result)
idx = IntervalIndex.from_tuples([(1, 5), (3, 7)])
s = Series(range(len(idx)), index=idx)
result = s[4]
expected = s
tm.assert_series_equal(expected, result)
result = s[[4]]
expected = s
tm.assert_series_equal(expected, result)
result = s.loc[[4]]
expected = s
tm.assert_series_equal(expected, result)
result = s[Interval(3, 5)]
expected = s
tm.assert_series_equal(expected, result)
result = s.loc[Interval(3, 5)]
expected = s
tm.assert_series_equal(expected, result)
# doesn't intersect unique set of intervals
with pytest.raises(KeyError):
s[[Interval(3, 5)]]
with pytest.raises(KeyError):
s.loc[[Interval(3, 5)]]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_non_unique(self):
idx = IntervalIndex.from_tuples([(1, 3), (3, 7)])
s = Series(range(len(idx)), index=idx)
result = s.loc[Interval(1, 3)]
assert result == 0
result = s.loc[[Interval(1, 3)]]
expected = s.iloc[0:1]
tm.assert_series_equal(expected, result)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_non_unique_moar(self):
idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)])
s = Series(range(len(idx)), index=idx)
result = s.loc[Interval(1, 3)]
expected = s.iloc[[0, 1]]
tm.assert_series_equal(expected, result)
# non-unique index and slices not allowed
with pytest.raises(ValueError):
s.loc[Interval(1, 3):]
with pytest.raises(ValueError):
s[Interval(1, 3):]
# non-unique
with pytest.raises(ValueError):
s[[Interval(1, 3)]]
# TODO: check this behavior is consistent with test_interval_new.py
def test_non_matching(self):
s = self.s
# this is a departure from our current
# indexin scheme, but simpler
with pytest.raises(KeyError):
s.loc[[-1, 3, 4, 5]]
with pytest.raises(KeyError):
s.loc[[-1, 3]]
def test_large_series(self):
s = Series(np.arange(1000000),
index=IntervalIndex.from_breaks(np.arange(1000001)))
result1 = s.loc[:80000]
result2 = s.loc[0:80000]
result3 = s.loc[0:80000:1]
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
def test_loc_getitem_frame(self):
df = DataFrame({'A': range(10)})
s = pd.cut(df.A, 5)
df['B'] = s
df = df.set_index('B')
result = df.loc[4]
expected = df.iloc[4:6]
tm.assert_frame_equal(result, expected)
with pytest.raises(KeyError):
df.loc[10]
# single list-like
result = df.loc[[4]]
expected = df.iloc[4:6]
tm.assert_frame_equal(result, expected)
# non-unique
result = df.loc[[4, 5]]
expected = df.take([4, 5, 4, 5])
tm.assert_frame_equal(result, expected)
with pytest.raises(KeyError):
df.loc[[10]]
# partial missing
with pytest.raises(KeyError):
df.loc[[10, 4]]
|
{
"content_hash": "fe4b0b2a16d9e10c43ab1c893ee04d0c",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 78,
"avg_line_length": 29.767790262172284,
"alnum_prop": 0.5546049320583795,
"repo_name": "pratapvardhan/pandas",
"id": "f2f59159032a26cd9da6629a771173eb4a47f109",
"size": "7948",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pandas/tests/indexing/interval/test_interval.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "432930"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "563"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "13598412"
},
{
"name": "Shell",
"bytes": "25368"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
import json
import os
import psycopg2
import logging
def get_config(conf_filename="../config.json"):
logging.info("Getting config from %s" % conf_filename)
dir = os.path.dirname(__file__)
if not dir:
dir = "."
config_file = dir + os.sep + conf_filename
with open(config_file) as data_file:
config = json.load(data_file)
return config
def get_connection():
connection = psycopg2.connect(**get_config())
logging.info("Got database connection")
return connection
|
{
"content_hash": "2909a0ce40e1b8297955bdf146dcc423",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 24.571428571428573,
"alnum_prop": 0.6608527131782945,
"repo_name": "Samreay/TaipanDB",
"id": "a2196af94aa2f8053cc26528ae23622930e3c13b",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25332"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
}
|
import multiprocessing
assert multiprocessing
import re
from setuptools import setup, find_packages
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'dynamic_db_router/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
def get_requirements(path):
with open(path, 'r') as requirements_file:
return requirements_file.read().split('\n')
setup(
name='django-dynamic-db-router',
version=get_version(),
description='Simply route complex django queries to multiple databases.',
long_description=open('README.rst').read(),
url='https://github.com/ambitioninc/django-dynamic-db-router',
author='Erik Swanson',
author_email='opensource@ambition.com',
keywords='',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
license='MIT',
install_requires=get_requirements('requirements/setup.txt'),
tests_require=get_requirements('requirements/test.txt'),
include_package_data=True,
test_suite='run_tests.run_tests',
zip_safe=False,
)
|
{
"content_hash": "b455ee9bf7962be42b8b550b21b93976",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 32.0625,
"alnum_prop": 0.6465237166991553,
"repo_name": "ambitioninc/django-dynamic-db-router",
"id": "32ef86f014621b8ec82320b2ecd94af93af6315f",
"size": "1628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14429"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2016, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from webapp import app, db, util
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def import_regions_to_sql():
util.import_regions_to_sql()
@manager.command
def import_regions_to_es():
util.import_regions_to_es()
@manager.command
def sync_sources():
util.sync_sources()
@manager.command
def sync_source(name):
util.sync_source(name)
@manager.command
def import_vehicles_to_es():
util.import_vehicles_to_es()
if __name__ == "__main__":
manager.run()
|
{
"content_hash": "734436ef4cb2f4d800e2f9fc9bfd8b95",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 755,
"avg_line_length": 50.07142857142857,
"alnum_prop": 0.7883975273418925,
"repo_name": "ruhrmobil-E/mobilitaet-finden",
"id": "83359652a48661bc1581c1ffd340161947359789",
"size": "2122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4165"
},
{
"name": "HTML",
"bytes": "21972"
},
{
"name": "JavaScript",
"bytes": "23732"
},
{
"name": "Python",
"bytes": "70993"
},
{
"name": "Shell",
"bytes": "933"
}
],
"symlink_target": ""
}
|
"""
Views for managing Nova instances.
"""
import logging
from django import http
from django import shortcuts
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext as _
from horizon import api
from horizon import exceptions
from horizon import forms
from horizon import views
from .forms import UpdateInstance
LOG = logging.getLogger(__name__)
def console(request, instance_id):
try:
# TODO(jakedahn): clean this up once the api supports tailing.
length = request.GET.get('length', None)
console = api.server_console_output(request,
instance_id,
tail_length=length)
response = http.HttpResponse(mimetype='text/plain')
response.write(console)
response.flush()
return response
except:
msg = _('Unable to get log for instance "%s".') % instance_id
redirect = reverse('horizon:nova:instances_and_volumes:index')
exceptions.handle(request, msg, redirect=redirect)
def vnc(request, instance_id):
try:
console = api.server_vnc_console(request, instance_id)
instance = api.server_get(request, instance_id)
return shortcuts.redirect(console.url +
("&title=%s(%s)" % (instance.name, instance_id)))
except:
redirect = reverse("horizon:nova:instances_and_volumes:index")
msg = _('Unable to get VNC console for instance "%s".') % instance_id
exceptions.handle(request, msg, redirect=redirect)
class UpdateView(forms.ModalFormView):
form_class = UpdateInstance
template_name = 'nova/instances_and_volumes/instances/update.html'
context_object_name = 'instance'
def get_object(self, *args, **kwargs):
if not hasattr(self, "object"):
instance_id = self.kwargs['instance_id']
try:
self.object = api.server_get(self.request, instance_id)
except:
redirect = reverse("horizon:nova:instances_and_volumes:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self.object
def get_initial(self):
return {'instance': self.kwargs['instance_id'],
'tenant_id': self.request.user.tenant_id,
'name': getattr(self.object, 'name', '')}
class DetailView(views.APIView):
template_name = 'nova/instances_and_volumes/instances/detail.html'
def get_data(self, request, context, *args, **kwargs):
instance_id = kwargs['instance_id']
if "show" in request.GET:
show_tab = request.GET["show"]
else:
show_tab = "overview"
try:
instance = api.server_get(request, instance_id)
volumes = api.volume_instance_list(request, instance_id)
# Gather our flavors and images and correlate our instances to
# them. Exception handling happens in the parent class.
flavors = api.flavor_list(request)
full_flavors = SortedDict([(str(flavor.id), flavor) for \
flavor in flavors])
instance.full_flavor = full_flavors[instance.flavor["id"]]
context.update({'instance': instance, 'volumes': volumes})
except:
redirect = reverse('horizon:nova:instances_and_volumes:index')
exceptions.handle(request,
_('Unable to retrieve details for '
'instance "%s".') % instance_id,
redirect=redirect)
if show_tab == "vnc":
try:
console = api.server_vnc_console(request, instance_id)
vnc_url = "%s&title=%s(%s)" % (console.url,
getattr(instance, "name", ""),
instance_id)
context.update({'vnc_url': vnc_url})
except:
exceptions.handle(request,
_('Unable to get vnc console for '
'instance "%s".') % instance_id)
context.update({'show_tab': show_tab})
return context
|
{
"content_hash": "13e58cfd64348045fe78aecb60e394bc",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 37.85344827586207,
"alnum_prop": 0.5743566385789114,
"repo_name": "rcbops/horizon-buildpackage",
"id": "9562ec2306f9b23d594a975c1307b3ef5d471319",
"size": "5200",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "horizon/dashboards/nova/instances_and_volumes/instances/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "618253"
}
],
"symlink_target": ""
}
|
from Tkinter import *
import tkMessageBox
import re
def validate():
userInput = entryBox.get()
numCount = 0
operandCount = 0
entryBracketCount = 0
exitBracketCount = 0
charCount = 0
endOfLine = len(userInput) - 1
for i in range(len(userInput)):
if (re.search('[\s*a-z\s*A-Z]+', userInput[i])):
charCount = charCount + 1
elif (re.search('[\s*0-9]+', userInput[i])):
numCount = numCount + 1
elif (re.search('[\s*\+|\s*\-|\s*\/]+', userInput[i])):
operandCount = operandCount + 1
if(re.search('[\s*\+|\s*\-|\s*\/]+', userInput[endOfLine])):
tkMessageBox.showinfo("Hey pal", "invalid expression")
else:
if((re.search('[\s*a-zA-Z]+', userInput[i - 1])) or (re.search('[\s*\d]+', userInput[i - 1]))):
continue
else:
tkMessageBox.showinfo("Hey pal", "invalid expression")
if(re.search('[\s*\d]+', userInput[i - 1])):
continue
else:
tkMessageBox.showinfo("Hey pal", "invalid expression")
if(re.search('[\s*a-zA-Z]+', userInput[i + 1])):
continue
elif(re.search('[\s*\d]+', userInput[i + 1])):
continue
elif (re.search('[\(]+', userInput[i + 1])):
continue
elif (re.search('[\)]+', userInput[i + 1])):
continue
else:
tkMessageBox.showinfo("Hey pal", "invalid expression")
elif(re.search('[\(]+', userInput[i])):
entryBracketCount = entryBracketCount + 1
elif(re.search('[\)]+', userInput[i])):
exitBracketCount = exitBracketCount + 1
if(re.search('[\)]+', userInput[endOfLine])):
continue
else:
if(re.search('[\(]+', userInput[i + 1])):
kMessageBox.showinfo("Hey pal", "invalid expression")
if (entryBracketCount != exitBracketCount):
tkMessageBox.showinfo("Hey pal", "invalid expression")
elif operandCount == 0:
tkMessageBox.showinfo("Hey pal", "invalid expression")
elif ((numCount == 0) and (charCount == 0)):
tkMessageBox.showinfo("Hey pal", "invalid expression")
else:
tkMessageBox.showinfo("Hey pal", "valid expression ")
root = Tk()
entryBox = Entry(root, width=80)
entryBox.pack()
bDisplayMessage = Button(root, text ="validate", command = validate)
bDisplayMessage.pack()
root.mainloop()
|
{
"content_hash": "6fce72fa5cce845cddb42dcd5fa5412b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 99,
"avg_line_length": 29.944444444444443,
"alnum_prop": 0.6266233766233766,
"repo_name": "dominickhera/PosaRepo",
"id": "baacd73b2a7393b06043f6308fca166682c18583",
"size": "2175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cis3250labs/guiTest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "8043"
},
{
"name": "C",
"bytes": "1727439"
},
{
"name": "C++",
"bytes": "75654"
},
{
"name": "COBOL",
"bytes": "4854"
},
{
"name": "CSS",
"bytes": "259311"
},
{
"name": "Fortran",
"bytes": "16927"
},
{
"name": "HTML",
"bytes": "931603"
},
{
"name": "JavaScript",
"bytes": "322808"
},
{
"name": "Makefile",
"bytes": "21000"
},
{
"name": "Perl",
"bytes": "22818"
},
{
"name": "Python",
"bytes": "55993"
},
{
"name": "R",
"bytes": "10512"
},
{
"name": "Shell",
"bytes": "2231"
}
],
"symlink_target": ""
}
|
config = {
# File system base paths, inventory base paths, reports base paths.
# A file system base path is the root of a section of the file system that
# will be inventoried.
# Each file system base path will have its own inventory and reports.
# The inventory filepath contains the inventory records for the file system.
# The reports filepath contains the reports for the file system.
'file_systems': [('/Users/justinlittman/tmp/inventory/fs',
'/Users/justinlittman/tmp/inventory/inventory',
'/Users/justinlittman/tmp/inventory/reports')],
# Location of report index database.
'report_index_db': '/Users/justinlittman/tmp/inventory/reports.db',
# Email configuration
'email': {
'username': 'someone@email.gwu.edu',
'password': 'password',
'port': 587,
'host': 'smtp.gmail.com',
'send_to': ['someone@gwu.edu', 'someone_else@gwu.edu']
},
# Number of threads to use for fixity checking.
'fixity_threads': 3
}
|
{
"content_hash": "08b8b7bf33017ec2c057477f842683cf",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 49.30434782608695,
"alnum_prop": 0.5978835978835979,
"repo_name": "gwu-libraries/inventory",
"id": "c1a534b5c8661e1d5662568d3563c2806ca28401",
"size": "1134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44967"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from unittest import mock
from sqlalchemy.engine import reflection
from corehq.apps.userreports.models import (
DataSourceActionLog,
DataSourceConfiguration,
)
from corehq.apps.userreports.tests.utils import (
get_sample_data_source,
skip_domain_filter_patch,
)
from corehq.apps.userreports.util import get_indicator_adapter, get_table_name
from corehq.pillows.case import get_case_pillow
from corehq.util.test_utils import softer_assert
def setup_module():
skip_domain_filter_patch.start()
def teardown_module():
skip_domain_filter_patch.stop()
class RebuildTableTest(TestCase):
"""This test is pretty fragile because in UCRs we have a global metadata
object that sqlalchemy uses to keep track of tables and indexes. I've attempted
to work around it here, but it feels a little nasty
"""
def tearDown(self):
self.adapter.drop_table()
self.config.delete()
def _get_config(self, extra_id):
config = get_sample_data_source()
config.table_id = config.table_id + extra_id
return config
def _setup_data_source(self, extra_id):
self.config = self._get_config(extra_id)
self.config.save()
get_case_pillow(ucr_configs=[self.config])
self.adapter = get_indicator_adapter(self.config)
self.engine = self.adapter.engine
def test_add_index(self):
# build the table without an index
self._setup_data_source('add_index')
insp = reflection.Inspector.from_engine(self.engine)
table_name = get_table_name(self.config.domain, self.config.table_id)
self.assertEqual(len(insp.get_indexes(table_name)), 0)
# add the index to the config
config = self._get_config('add_index')
self.addCleanup(config.delete)
config.configured_indicators[0]['create_index'] = True
config.save()
adapter = get_indicator_adapter(config)
with mock.patch('corehq.apps.userreports.pillow_utils.rebuild_table') as rebuild_table, \
mock.patch('corehq.apps.userreports.pillow_utils.migrate_tables_with_logging') as migrate_table:
get_case_pillow(ucr_configs=[config])
self.assertFalse(rebuild_table.called)
self.assertTrue(migrate_table.called)
engine = adapter.engine
insp = reflection.Inspector.from_engine(engine)
# note the index is not yet created
self.assertEqual(len(insp.get_indexes(table_name)), 0)
def test_add_non_nullable_column(self):
self._setup_data_source('add_non_nullable_col')
# assert new date isn't in the config
insp = reflection.Inspector.from_engine(self.engine)
table_name = get_table_name(self.config.domain, self.config.table_id)
self.assertEqual(
len([c for c in insp.get_columns(table_name) if c['name'] == 'new_date']), 0
)
# add the column to the config
config = self._get_config('add_non_nullable_col')
self.addCleanup(config.delete)
config.configured_indicators.append({
"column_id": "new_date",
"type": "raw",
"display_name": "new_date opened",
"datatype": "datetime",
"property_name": "other_opened_on",
"is_nullable": False
})
config.save()
adapter = get_indicator_adapter(config)
engine = adapter.engine
# mock rebuild table to ensure the table is rebuilt
with mock.patch('corehq.apps.userreports.pillow_utils.rebuild_table') as rebuild_table:
get_case_pillow(ucr_configs=[config])
self.assertTrue(rebuild_table.called)
# column doesn't exist because rebuild table was mocked
insp = reflection.Inspector.from_engine(engine)
self.assertEqual(
len([c for c in insp.get_columns(table_name) if c['name'] == 'new_date']), 0
)
# Another time without the mock to ensure the column is there
get_case_pillow(ucr_configs=[config])
insp = reflection.Inspector.from_engine(engine)
self.assertEqual(
len([c for c in insp.get_columns(table_name) if c['name'] == 'new_date']), 1
)
def test_add_nullable_column(self):
self._setup_data_source('add_nullable_col')
# assert new date isn't in the config
insp = reflection.Inspector.from_engine(self.engine)
table_name = get_table_name(self.config.domain, self.config.table_id)
self.assertEqual(
len([c for c in insp.get_columns(table_name) if c['name'] == 'new_date']), 0
)
# add the column to the config
config = self._get_config('add_nullable_col')
self.addCleanup(config.delete)
config.configured_indicators.append({
"column_id": "new_date",
"type": "raw",
"display_name": "new_date opened",
"datatype": "datetime",
"property_name": "other_opened_on",
"is_nullable": True
})
config.save()
adapter = get_indicator_adapter(config)
engine = adapter.engine
# mock rebuild table to ensure the column is added without rebuild table
with mock.patch('corehq.apps.userreports.pillow_utils.rebuild_table') as rebuild_table:
get_case_pillow(ucr_configs=[config])
self.assertFalse(rebuild_table.called)
insp = reflection.Inspector.from_engine(engine)
self.assertEqual(
len([c for c in insp.get_columns(table_name) if c['name'] == 'new_date']), 1
)
@softer_assert()
def test_skip_destructive_rebuild(self):
self.config = self._get_config('add_non_nullable_col')
self.config.disable_destructive_rebuild = True
self.config.save()
get_case_pillow(ucr_configs=[self.config])
self.adapter = get_indicator_adapter(self.config)
self.engine = self.adapter.engine
# assert new date isn't in the config
insp = reflection.Inspector.from_engine(self.engine)
table_name = get_table_name(self.config.domain, self.config.table_id)
self.assertEqual(
len([c for c in insp.get_columns(table_name) if c['name'] == 'new_date']), 0
)
# re-fetch from DB to bust object caches
self.config = DataSourceConfiguration.get(self.config.data_source_id)
# add the column to the config
self.config.configured_indicators.append({
"column_id": "new_date",
"type": "raw",
"display_name": "new_date opened",
"datatype": "datetime",
"property_name": "other_opened_on",
"is_nullable": False
})
self.config.save()
# re-fetch from DB to bust object caches
self.config = DataSourceConfiguration.get(self.config.data_source_id)
# bootstrap to trigger rebuild
get_case_pillow(ucr_configs=[self.config])
logs = DataSourceActionLog.objects.filter(
indicator_config_id=self.config.data_source_id,
skip_destructive=True
)
self.assertEqual(1, len(logs))
self.assertEqual(logs[0].migration_diffs, [{'type': 'add_column', 'item_name': 'new_date'}])
# make the column allow nulls and check that it gets applied (since is non-destructive)
self.config.configured_indicators[-1]['is_nullable'] = True
self.config.save()
# re-fetch from DB to bust object caches
self.config = DataSourceConfiguration.get(self.config.data_source_id)
# make sure change made it
self.assertEqual(True, self.config.configured_indicators[-1]['is_nullable'])
# bootstrap to trigger rebuild
get_case_pillow(ucr_configs=[self.config])
# make sure we didn't add any more logs
self.assertEqual(
DataSourceActionLog.objects.filter(
indicator_config_id=self.config.data_source_id,
skip_destructive=True
).count(),
1,
)
# confirm the column was added to the table
insp = reflection.Inspector.from_engine(self.engine)
self.assertEqual(
len([c for c in insp.get_columns(table_name) if c['name'] == 'new_date']), 1
)
def test_implicit_pk(self):
self._setup_data_source('implicit_pk')
insp = reflection.Inspector.from_engine(self.engine)
table_name = get_table_name(self.config.domain, self.config.table_id)
pk = insp.get_pk_constraint(table_name)
expected_pk = ['doc_id']
self.assertEqual(expected_pk, pk['constrained_columns'])
def test_ordered_pk(self):
self._setup_data_source('ordered_pk')
config = self._get_config('ordered_pk')
config.configured_indicators.append({
"column_id": "pk_key",
"type": "raw",
"datatype": "string",
"property_name": "owner_id",
"is_primary_key": True
})
config.sql_settings.primary_key = ['pk_key', 'doc_id']
config.save()
get_case_pillow(ucr_configs=[config])
adapter = get_indicator_adapter(config)
engine = adapter.engine
insp = reflection.Inspector.from_engine(engine)
table_name = get_table_name(self.config.domain, self.config.table_id)
pk = insp.get_pk_constraint(table_name)
expected_pk = ['pk_key', 'doc_id']
self.assertEqual(expected_pk, pk['constrained_columns'])
|
{
"content_hash": "7437148650c8ae0aff16b34d62921df1",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 108,
"avg_line_length": 38.46586345381526,
"alnum_prop": 0.6221549384005012,
"repo_name": "dimagi/commcare-hq",
"id": "8781f66f21fd1332bee9ffd5d3de22b88d4d0a62",
"size": "9578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/userreports/tests/test_rebuild_migrate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
__author__ = 'kotaimen'
__date__ = '6/12/14'
"""
georest.geo.operations
~~~~~~~~~~~~~~~~~~~~~~
Wraps every geometry operation in its own functor class, thus being very
naive and stupid.
All operator parameters are set in the CTOR and only accept geometries
in the `__call__` operator. Basic type conversion and validation is
also done in the CTOR, so we can simply pass `request.args` into the
operator.
Geometry parameters are never modified but may returned as result if
no computation is required.
Design note:
We can implement this using pythonic method like dynamic bounded method
lookup, but chose not to since this is more friendly to unit testing.
However, operation parameter handling is still implemented in a clever,
twisted, cruel, wild, pythonic metaprogrammingish way to save typing :)
"""
import six
import shapely.geometry.base
import shapely.ops
from .geometry import Geometry
from .exceptions import GeoException, OperationError, InvalidParameter
from .spatialref import CoordinateTransform, SpatialReference
__all__ = ['BaseOperation', 'UnaryOperation', 'Attribute', 'UnaryPredicate',
'UnaryConstructor', 'BinaryOperation', 'BinaryPredicate',
'BinarySetTheoreticMethod', 'MultiGeometryOperation',
'Area', 'Length', 'IsSimple', 'Buffer',
'ConvexHull', 'Envelope', 'ParallelOffset',
'Simplify', 'Boundary', 'PointOnSurface', 'Centroid',
'Distance',
'Equals', 'Contains', 'Crosses', 'Disjoint', 'Intersects',
'Touches', 'Within',
'Intersection', 'SymmetricDifference', 'Difference', 'Union',
'CascadeUnion', ]
#
# Base classes
#
class BaseOperation(object):
"""Base geometry operation, being a multi geometry operation"""
# Whether to raise OperationalError on failure
DEBUG = False
# Operation result type
RESULT_TYPE = Geometry
def __init__(self, **kwargs):
"""Create the operator, if `srid` is provided, geometry will be
transformed to given CRS before the actual operation is called.
Note if given `srid` is identical the geometry, `CoordinateTransform`
will not return a new copied geometry object to save time.
"""
try:
self._srid = kwargs.pop('srid')
except KeyError:
self._srid = None
# any left args is assumed to be used by actual operator
self._kwargs = kwargs
def __call__(self, *geometries):
assert all(Geometry.is_geometry(g) for g in geometries)
if self._srid:
# need do crs transform before performing operation
geometries = tuple(self._transform_crs(geometries))
result_crs = SpatialReference(self._srid)
else:
result_crs = self._check_crs(geometries)
# call implementation, catch all exception and raise as 500 error
try:
result = self._impl(*geometries)
except GeoException as e:
raise
except Exception as e:
if self.DEBUG:
raise
else:
raise OperationError(e=e)
if isinstance(result, shapely.geometry.base.BaseGeometry):
assert self.RESULT_TYPE == Geometry
# update spatial reference
result = Geometry.build_geometry(result,
srid=result_crs.srid,
empty_check=False)
return result
def _transform_crs(self, geometries):
for geometry in geometries:
geom_crs = geometry.crs
if not geom_crs:
raise InvalidParameter(
'Requires all geometries have CRS defined')
result_crs = SpatialReference(self._srid)
transform = CoordinateTransform.build_transform(geom_crs,
result_crs)
yield transform(geometry)
def _check_crs(self, geometries):
if not any(bool(g.crs) for g in geometries):
# if all geometries have undefined CRS or unassigned CRS
# we assume you know what you are doing
return SpatialReference(srid=0)
elif len(set(g.crs.srid for g in geometries)) > 1:
# but in any case you can't mix different CRS
raise InvalidParameter('Cannot operate on mixed CRS')
else:
return SpatialReference(geometries[0].crs.srid)
class UnaryOperation(BaseOperation):
"""Base unary geometry operation"""
def __call__(self, this):
"""Accepts a single geometry, and call underlying implement"""
return BaseOperation.__call__(self, this)
def _impl(self, this):
"""By default, be a good coordinate transformer"""
return this
class Attribute(UnaryOperation):
"""Geometry attributes returns a float/int/string"""
pass
class UnaryPredicate(UnaryOperation):
"""Accepts a geometry and returns bool"""
RESULT_TYPE = bool
class UnaryConstructor(UnaryOperation):
"""Accepts a geometry and optional parameters,
returns a new geometry"""
pass
class UnarySetTheoreticMethod(UnaryOperation):
"""Accepts a geometry and optional parameters,
returns a new geometry"""
pass
class AffineTransform(UnaryOperation):
"""Accepts a geometry and optional parameters,
returns a new geometry"""
pass
class BinaryOperation(BaseOperation):
"""Base binary geometry operation"""
def __call__(self, this, other):
"""Accepts two geometries, and call underlying implement """
return BaseOperation.__call__(self, this, other)
def _impl(self, this, other):
raise NotImplementedError
class BinaryPredicate(BinaryOperation):
"""Accepts two geometries and returns bool"""
RESULT_TYPE = bool
class BinarySetTheoreticMethod(BinaryOperation):
"""Binary set-theoretic methods"""
pass
class LineReference(BinaryOperation):
pass
class MultiGeometryOperation(BaseOperation):
pass
class ParameterHelper(object):
"""Very simple parameter validator"""
def __init__(self, args):
self._args = args
def update_kwargs(self, kwargs, local):
"""Merge kwargs from given locals using valid args"""
kwargs = kwargs.copy()
for name in self._args:
kwargs[name] = local[name]
return kwargs
def extract_kwargs(self, kwargs):
"""Extract valid args from kwargs, so we can ignore any extra
arguments before call actual geometry operators"""
args = dict()
for name in self._args:
args[name] = kwargs[name]
return args
def check_float(self, **kwargs):
assert len(kwargs) == 1
name, value = kwargs.popitem()
if isinstance(value, six.string_types):
try:
return float(value)
except ValueError as e:
raise InvalidParameter('%s is not a float: "%r"' % \
(name, value), e=e)
elif isinstance(value, float):
return value
else:
raise InvalidParameter('%s must be a float, got "%r"' % \
(name, value))
def check_integer(self, **kwargs):
assert len(kwargs) == 1
name, value = kwargs.popitem()
if isinstance(value, six.string_types):
try:
return int(value)
except ValueError as e:
raise InvalidParameter(
'%s is not an integer: "%r"' % (name, value), e=e)
elif isinstance(value, int):
return value
else:
raise InvalidParameter(
'%s must be an integer, got "%r"' % (name, value))
def check_boolean(self, **kwargs):
assert len(kwargs) == 1
name, value = kwargs.popitem()
if isinstance(value, six.string_types):
if value.lower() in ['1', 'true']:
return True
elif value.lower() in ['0', 'false']:
return False
else:
raise InvalidParameter(
'%s must be a boolean value, got "%s"' % (name, value))
elif isinstance(value, bool):
return value
else:
raise InvalidParameter(
'%s must be a True or False, got "%s"' % (name, value))
def check_range(self, low=0., high=65535., **kwargs):
assert len(kwargs) == 1
name, value = kwargs.popitem()
if value < low or value > high:
raise InvalidParameter('%s must between %r, %r' % \
(name, low, high))
def check_open_range(self, low=0., high=65535., **kwargs):
assert len(kwargs) == 1
name, value = kwargs.popitem()
if value <= low or value >= high:
raise InvalidParameter('%s must between %r, %r' % \
(name, low, high))
def check_choices(self, choices=None, **kwargs):
assert len(kwargs) == 1
assert choices is not None
name, value = kwargs.popitem()
if value not in choices:
raise InvalidParameter('%s must be one of %r, got "%r"' % \
(name, choices, value))
def check_geometry_type_is(self, geometry, *choices):
if geometry.geom_type not in choices:
raise InvalidParameter('Expecting one of %r instead of a %r' % (
choices, geometry.geom_type))
def check_geometry_type_is_not(self, geometry, *choices):
if geometry.geom_type in choices:
raise InvalidParameter('None of %r is allowed but got a %r' % (
choices, geometry.geom_type))
class Area(Attribute):
RESULT_TYPE = float
__doc__ = shapely.geometry.base.BaseGeometry.area.__doc__
def _impl(self, this):
return this.area
class Length(Attribute):
RESULT_TYPE = float
__doc__ = shapely.geometry.base.BaseGeometry.length.__doc__
def _impl(self, this):
return this.length
class IsSimple(UnaryPredicate):
RESULT_TYPE = bool
__doc__ = shapely.geometry.base.BaseGeometry.is_simple.__doc__
def _impl(self, this):
return this.is_simple
class Buffer(UnaryConstructor, ParameterHelper):
__doc__ = shapely.geometry.base.BaseGeometry.buffer.__doc__
def __init__(self, distance=0.01, resolution=16,
cap_style='round', join_style='round',
mitre_limit=1.0, **kwargs):
ParameterHelper.__init__(self, ['distance', 'resolution', 'cap_style',
'join_style', 'mitre_limit'])
distance = self.check_float(distance=distance)
resolution = self.check_integer(resolution=resolution)
mitre_limit = self.check_float(mitre_limit=mitre_limit)
self.check_range(resolution=resolution, low=1, high=100)
self.check_range(mitre_limit=mitre_limit, low=0., high=100.0)
self.check_choices(cap_style=cap_style,
choices=['round', 'flat', 'square'])
self.check_choices(join_style=join_style,
choices=['round', 'mitre', 'bevel'])
cap_style = getattr(shapely.geometry.CAP_STYLE, cap_style)
join_style = getattr(shapely.geometry.JOIN_STYLE, join_style)
kwargs = self.update_kwargs(kwargs, locals())
super(Buffer, self).__init__(**kwargs)
def _impl(self, this):
kwargs = self.extract_kwargs(self._kwargs)
return this.buffer(**kwargs)
class ConvexHull(UnaryConstructor):
__doc__ = shapely.geometry.base.BaseGeometry.convex_hull.__doc__
def _impl(self, this):
return this.convex_hull
class Envelope(UnaryConstructor):
__doc__ = shapely.geometry.base.BaseGeometry.envelope.__doc__
def _impl(self, this):
return this.envelope
class ParallelOffset(UnaryConstructor, ParameterHelper):
__doc__ = shapely.geometry.LineString.parallel_offset.__doc__
def __init__(self, distance=0.01, side='left', resolution=16,
join_style='round',
mitre_limit=1.0, **kwargs):
ParameterHelper.__init__(self, ['distance', 'side', 'resolution',
'join_style', 'mitre_limit'])
distance = self.check_float(distance=distance)
resolution = self.check_integer(resolution=resolution)
mitre_limit = self.check_float(mitre_limit=mitre_limit)
self.check_range(resolution=resolution, low=1, high=100)
self.check_range(mitre_limit=mitre_limit, low=0., high=100.0)
self.check_choices(side=side,
choices=['left', 'right'])
self.check_choices(join_style=join_style,
choices=['round', 'mitre', 'bevel'])
join_style = getattr(shapely.geometry.JOIN_STYLE, join_style)
kwargs = self.update_kwargs(kwargs, locals())
super(ParallelOffset, self).__init__(**kwargs)
def _impl(self, this):
self.check_geometry_type_is(this, 'LineString', 'MultiLineString')
kwargs = self.extract_kwargs(self._kwargs)
return this.parallel_offset(**kwargs)
class Simplify(UnaryConstructor, ParameterHelper):
__doc__ = shapely.geometry.base.BaseGeometry.simplify.__doc__
def __init__(self, tolerance=0.01, preserve_topology=False, **kwargs):
ParameterHelper.__init__(self, ['tolerance', 'preserve_topology'])
tolerance = self.check_float(tolerance=tolerance)
self.check_open_range(tolerance=tolerance, low=0., high=1.)
preserve_topology = self.check_boolean(
preserve_topology=preserve_topology)
kwargs = self.update_kwargs(kwargs, locals())
super(Simplify, self).__init__(**kwargs)
def _impl(self, this):
kwargs = self.extract_kwargs(self._kwargs)
return this.simplify(**kwargs)
class Boundary(UnarySetTheoreticMethod, ParameterHelper):
__doc__ = shapely.geometry.base.BaseGeometry.boundary.__doc__
def _impl(self, this):
return this.boundary
class Centroid(UnarySetTheoreticMethod):
__doc__ = shapely.geometry.base.BaseGeometry.centroid.__doc__
def _impl(self, this):
return this.centroid
class PointOnSurface(UnarySetTheoreticMethod):
__doc__ = shapely.geometry.base.BaseGeometry.representative_point.__doc__
def _impl(self, this):
return this.representative_point()
class Distance(BinaryOperation):
__doc__ = shapely.geometry.base.BaseGeometry.distance.__doc__
RESULT_TYPE = float
def _impl(self, this, other):
return this.distance(other)
class Equals(BinaryPredicate, ParameterHelper):
__doc__ = shapely.geometry.base.BaseGeometry.almost_equals.__doc__
def __init__(self, decimal=6, **kwargs):
ParameterHelper.__init__(self, ['decimal'])
decimal = self.check_integer(decimal=decimal)
self.check_range(decimal=decimal, low=0, high=16)
kwargs = self.update_kwargs(kwargs, locals())
super(Equals, self).__init__(**kwargs)
def _impl(self, this, other):
kwargs = self.extract_kwargs(self._kwargs)
return this.almost_equals(other, **kwargs)
class Contains(BinaryPredicate):
__doc__ = shapely.geometry.base.BaseGeometry.contains.__doc__
def _impl(self, this, other):
return this.contains(other)
class Crosses(BinaryPredicate):
__doc__ = shapely.geometry.base.BaseGeometry.crosses.__doc__
def _impl(self, this, other):
return this.crosses(other)
class Disjoint(BinaryPredicate):
__doc__ = shapely.geometry.base.BaseGeometry.disjoint.__doc__
def _impl(self, this, other):
return this.disjoint(other)
class Intersects(BinaryPredicate):
__doc__ = shapely.geometry.base.BaseGeometry.intersects.__doc__
def _impl(self, this, other):
return this.intersects(other)
class Touches(BinaryPredicate):
__doc__ = shapely.geometry.base.BaseGeometry.touches.__doc__
def _impl(self, this, other):
return this.touches(other)
class Within(BinaryPredicate):
__doc__ = shapely.geometry.base.BaseGeometry.within.__doc__
def _impl(self, this, other):
return this.within(other)
class Intersection(BinarySetTheoreticMethod):
__doc__ = shapely.geometry.base.BaseGeometry.intersection.__doc__
def _impl(self, this, other):
return this.intersection(other)
class Difference(BinarySetTheoreticMethod):
__doc__ = shapely.geometry.base.BaseGeometry.difference.__doc__
def _impl(self, this, other):
return this.difference(other)
class SymmetricDifference(BinarySetTheoreticMethod):
__doc__ = shapely.geometry.base.BaseGeometry.symmetric_difference.__doc__
def _impl(self, this, other):
return this.symmetric_difference(other)
class Union(BinarySetTheoreticMethod):
__doc__ = shapely.geometry.base.BaseGeometry.union.__doc__
def _impl(self, this, other):
return this.union(other)
class CascadeUnion(MultiGeometryOperation):
__doc__ = shapely.ops.unary_union.__doc__
def _impl(self, *geometries):
return shapely.ops.unary_union(geometries)
|
{
"content_hash": "a97494b4deb33e98ec5f545ef47c14ce",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 78,
"avg_line_length": 31.659380692167577,
"alnum_prop": 0.6143490017835568,
"repo_name": "Kotaimen/georest",
"id": "b638a72d9f0e4703c534a10deebec3a9ad4b7b66",
"size": "17408",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "georest/geo/operations.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Lua",
"bytes": "3316"
},
{
"name": "Python",
"bytes": "203216"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import time
import tempfile
from latex import latex
def preview(expr, output='png', viewer=None, euler=True, **latex_settings):
"""View expression or LaTeX markup in PNG, DVI, PostScript or
PDF form.
If the expr argument is an expression, it will be exported to
LaTeX and then compiled using available the TeX distribution.
The first argument, 'expr', may also be a LaTeX string.
The function will then run the appropriate viewer for the given
output format or use the user defined one. By default png
output is generated.
By default pretty Euler fonts are used for typesetting (they
were used to typeset the well known "Concrete Mathematics"
book). For that to work, you need the 'eulervm.sty' LaTeX style (in
Debian/Ubuntu, install the texlive-fonts-extra package). If you prefer
default AMS fonts or your system lacks 'eulervm' LaTeX package then
unset the 'euler' keyword argument.
To use viewer auto-detection, lets say for 'png' output, issue::
>> from sympy import *
>> x, y = symbols("x,y")
>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select different one::
>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats
the rules are slightly different. As an example we will take
'dvi' output format. If you would run::
>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your
system (predefined in the function, so it will try evince,
first, then kdvi and xdvi). If nothing is found you will
need to set the viewer explicitly::
>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on
your system it will gracefully raise an exception. You may also
enter 'file' for the viewer argument. Doing so will cause this function
to return a file object in read-only mode.
Currently this depends on pexpect, which is not available for windows.
Additional keyword args will be passed to the latex call. E.g. the
symbol_names flag::
>> phidd = Symbol('phidd')
>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
"""
# we don't want to depend on anything not in the
# standard library with SymPy by default
import pexpect
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
candidates = {
"dvi" : [ "evince", "okular", "kdvi", "xdvi" ],
"ps" : [ "evince", "okular", "gsview", "gv" ],
"pdf" : [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
if pexpect.which(candidate):
viewer = candidate
break
else:
raise SystemError("No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer not in special and not pexpect.which(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if not euler:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
else:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{eulervm}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
tmp = tempfile.mktemp()
with open(tmp + ".tex", "w") as tex:
tex.write(format % latex_string)
cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
if os.system("latex -halt-on-error %s.tex" % tmp) != 0:
raise SystemError("Failed to generate DVI output.")
os.remove(tmp + ".tex")
os.remove(tmp + ".aux")
os.remove(tmp + ".log")
if output != "dvi":
command = {
"ps" : "dvips -o %s.ps %s.dvi",
"pdf" : "dvipdf %s.dvi %s.pdf",
"png" : "dvipng -T tight -z 9 " + \
"--truecolor -o %s.png %s.dvi",
}
try:
if os.system(command[output] % (tmp, tmp)) != 0:
raise SystemError("Failed to generate '%s' output." % output)
else:
os.remove(tmp + ".dvi")
except KeyError:
raise SystemError("Invalid output format: %s" % output)
src = "%s.%s" % (tmp, output)
src_file = None
if viewer == "file":
src_file = open(src, 'rb')
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for plotting.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(src, decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width = img.width + 2*offset,
height = img.height + 2*offset,
caption = "sympy",
resizable = False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
os.system("%s %s &> /dev/null &" % (viewer, src))
time.sleep(2) # wait for the viewer to read data
os.remove(src)
os.chdir(cwd)
if src_file is not None:
return src_file
|
{
"content_hash": "6a1afb981b89e348c118f6c2593c7a40",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 96,
"avg_line_length": 32.651785714285715,
"alnum_prop": 0.5358217117856167,
"repo_name": "ichuang/sympy",
"id": "43cd38c2432fdd06dbc60381d6276e278693077b",
"size": "7314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/printing/preview.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""ATAG water heater component."""
from typing import Any
from homeassistant.components.water_heater import (
STATE_ECO,
STATE_PERFORMANCE,
WaterHeaterEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, STATE_OFF, TEMP_CELSIUS, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN, AtagEntity
OPERATION_LIST = [STATE_OFF, STATE_ECO, STATE_PERFORMANCE]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Initialize DHW device from config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities([AtagWaterHeater(coordinator, Platform.WATER_HEATER)])
class AtagWaterHeater(AtagEntity, WaterHeaterEntity):
"""Representation of an ATAG water heater."""
_attr_operation_list = OPERATION_LIST
_attr_supported_features = 0
_attr_temperature_unit = TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.coordinator.data.dhw.temperature
@property
def current_operation(self):
"""Return current operation."""
operation = self.coordinator.data.dhw.current_operation
return operation if operation in self.operation_list else STATE_OFF
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
if await self.coordinator.data.dhw.set_temp(kwargs.get(ATTR_TEMPERATURE)):
self.async_write_ha_state()
@property
def target_temperature(self):
"""Return the setpoint if water demand, otherwise return base temp (comfort level)."""
return self.coordinator.data.dhw.target_temperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self.coordinator.data.dhw.max_temp
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self.coordinator.data.dhw.min_temp
|
{
"content_hash": "9c81b3e500d5534134f947d6b8019f4e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 94,
"avg_line_length": 33.47692307692308,
"alnum_prop": 0.7095588235294118,
"repo_name": "mezz64/home-assistant",
"id": "009f84a72ef6eaf2153f95557f1f537f2fa30752",
"size": "2176",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/atag/water_heater.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
print len("abcde")
|
{
"content_hash": "5841f8c3fbf1f1ca2d70e5df7494f260",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 18,
"avg_line_length": 19,
"alnum_prop": 0.6842105263157895,
"repo_name": "jplevyak/pyc",
"id": "16261d036b0f9f1f1047161403bfa29c776fb913",
"size": "19",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/t57.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "25985"
},
{
"name": "C++",
"bytes": "600253"
},
{
"name": "GAP",
"bytes": "6107"
},
{
"name": "Groff",
"bytes": "787"
},
{
"name": "Makefile",
"bytes": "14494"
},
{
"name": "Python",
"bytes": "78086"
},
{
"name": "Shell",
"bytes": "2179"
}
],
"symlink_target": ""
}
|
import sys
import os
import argparse
def main():
parser=argparse.ArgumentParser(prog="annotateTSS.py")
parser.add_argument("affinities",nargs=1,help="TEPIC gene-TF scores")
parser.add_argument("expression",nargs=1,help="Gene expression file")
parser.add_argument("output",nargs=1,help="File to write the combined data to")
parser.add_argument("--geneIDs",nargs="?",help="Position of the gene IDs in the expression file",default=0,type=int)
parser.add_argument("--expressionC",nargs="?",help="Position of the gene expression estimate in the expression file",default=1,type=int)
parser.add_argument("--filterIDs",nargs="?",help="File containing gene IDs that should be considered",default=None)
args=parser.parse_args()
print("Reading TF affinities from file: "+args.affinities[0])
tfFile=open(args.affinities[0],"r")
tfFileheader=tfFile.readline().strip()
tfDict={}
tfKeys=set()
for l in tfFile:
s=l.split()
tfDict[s[0]]=l.strip()
tfKeys.add(s[0])
tfFile.close()
print("Reading Gene expression from file: "+args.expression[0])
if (args.geneIDs != 0):
print("Gene ID are retrieved from column "+str(args.geneIDs))
if (args.expressionC != 1):
print("Gene expression estimates are retrieved from column "+str(args.expressionC))
expFile=open(args.expression[0],"r")
expFileheader=expFile.readline().strip()
expDict={}
expKeys=set()
for l in expFile:
s=l.split()
if ("." in s[0]):
expDict[s[0].split(".")[0]]=str(s[args.expressionC])
expKeys.add(s[0].split(".")[0])
else:
expDict[s[0]]=str(s[args.expressionC])
expKeys.add(s[0])
expFile.close()
keys=expKeys.intersection(tfKeys)
if (args.filterIDs !=None):
filterSet=set()
print("Loading IDs that should be used for filtering")
filterFile=open(args.filterIDs,"r")
for l in filterFile:
if ("." in l):
filterSet.add(l.split(".")[0])
else:
filterSet.add(l.strip())
filterFile.close()
keys=keys.intersection(filterSet)
print("Overlapping gene IDs: "+str(len(keys)))
print("Writing integrated data to "+str(args.output[0]))
outfile=open(args.output[0],"w")
outfile.write(tfFileheader+"\tExpression\n")
for key in keys:
outfile.write(tfDict[key]+"\t"+expDict[key]+"\n")
outfile.close()
main()
|
{
"content_hash": "41672e5237fa6c3907a22a4e3137137a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 137,
"avg_line_length": 33.81818181818182,
"alnum_prop": 0.7020609318996416,
"repo_name": "SchulzLab/TEPIC",
"id": "65b0497a429ffa69ce86e957938904c701ff3c32",
"size": "2783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MachineLearningPipelines/DYNAMITE/Scripts/integrateData.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "15935"
},
{
"name": "Python",
"bytes": "86251"
},
{
"name": "R",
"bytes": "59133"
},
{
"name": "Shell",
"bytes": "62615"
}
],
"symlink_target": ""
}
|
from .main import main
import click
from .. import dbutils2
@main.command()
@click.option('--dbtype', '-t', default=None, help='Database type',
type=click.Choice(['sqlite', 'mysql', 'mariadb'], case_sensitive=False), required=True)
@click.option('--database', '-d', default='', help='Database name (file name for sqlite)', type=str, required=True)
@click.option('--host', '-h', default='localhost', help='Database host name', type=str)
@click.option('--username', '-u', default='user', help='Database user name', type=str)
@click.option('--password', '-p', default='', help='Database password', type=str)
@click.option('--config', '-c', default='config/cct.pickle', help='Config file',
type=click.Path(exists=True, file_okay=True, dir_okay=False, writable=False, readable=True,
allow_dash=False, ))
@click.option('--verbose', '-v', is_flag=True, default=False, help='Verbose operation', type=bool)
@click.option('--readall', '-a', is_flag=True, default=False,
help='Read all headers instead of only those after the last one', type=bool)
def updatedb(dbtype: str, database: str, host: str, username: str, password: str, config: str, verbose: bool,
readall: bool):
"""Create or update the exposure list database"""
dbutils2.updatedb.updatedb(dbtype, host, database, username, password, config, verbose, not readall)
|
{
"content_hash": "68b2cc314dfa85e687b95a5ca7a76a9d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 115,
"avg_line_length": 67.04761904761905,
"alnum_prop": 0.6633522727272727,
"repo_name": "awacha/cct",
"id": "02d27f44bcf1967683adcf2975403d87524b332b",
"size": "1408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cct/cmdline/updatedb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "607"
},
{
"name": "CSS",
"bytes": "908"
},
{
"name": "Cython",
"bytes": "70859"
},
{
"name": "HTML",
"bytes": "1665"
},
{
"name": "Jupyter Notebook",
"bytes": "195924"
},
{
"name": "Python",
"bytes": "1944682"
},
{
"name": "Shell",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from juwparser import timestampize
def send(message, settings):
with open(settings['FILE_SAVE'], 'a') as logfile:
logfile.write(timestampize(message))
logfile.write("\n")
|
{
"content_hash": "fe32cea4ad1bdecde58297ed5c4e5eef",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 27.571428571428573,
"alnum_prop": 0.6787564766839378,
"repo_name": "krzysztofr/juwreport",
"id": "bf650badcc26fcce85eff0460d63e79b0955209f",
"size": "218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senders/file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18944"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_armor_bone_gloves.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b9661b1f785294a0f98b69c132c344ec",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.7021943573667712,
"repo_name": "obi-two/Rebelion",
"id": "020b56368f2516fe2d0b67d2f60d90e2eb99b4f5",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_bone_gloves.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from __future__ import division
import time
# Import the PCA9685 module.
import Adafruit_PCA9685
import sys #Used for closing the running program
import pygame #Gives access to KEYUP/KEYDOWN events
#Initialization for pygame
pygame.init()
screen = pygame.display.set_mode((700, 400))
pygame.display.set_caption('Remote Control Window')
# Fill background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((250, 250, 250))
# Display some text
instructions = '''
Micro Olli Controls
Press:
->w: Move Robot forward
->a: Turn Robot left
->d: Turn Robot right
->s: Move Robot backward
->z: Exit
''';
size_inc=15
index=0
for i in instructions.split('\n'):
font = pygame.font.Font(None, 36)
text = font.render(i, 1, (20, 20, 20))
background.blit(text, (10,10+size_inc*index))
index+=1
# Blit everything to the screen
screen.blit(background, (0, 0))
pygame.display.flip()
# Uncomment to enable debug output.
#import logging
#logging.basicConfig(level=logging.DEBUG)
# Uncomment to enable debug output.
#import logging
#logging.basicConfig(level=logging.DEBUG)
# Initialise the PCA9685 using the default address (0x40).
pwm = Adafruit_PCA9685.PCA9685()
# Alternatively specify a different address and/or bus:
#pwm = Adafruit_PCA9685.PCA9685(address=0x41, busnum=2)
# Configure min and max servo pulse lengths
servo_min = 1 # Min pulse length out of 4096
servo_max = 1000 # Max pulse length out of 4096
# Helper function to make setting a servo pulse width simpler.
def set_servo_pulse(channel, pulse):
pulse_length = 1000000 # 1,000,000 us per second
pulse_length //= 60 # 60 Hz
print('{0}us per period'.format(pulse_length))
pulse_length //= 4096 # 12 bits of resolution
print('{0}us per bit'.format(pulse_length))
pulse *= 1000
pulse //= pulse_length
pwm.set_pwm(channel, 0, pulse)
# Set frequency to 60hz, good for servos.
pwm.set_pwm_freq(60)
while True:
event = pygame.event.wait();
if (event.type == pygame.KEYUP):
pwm.set_pwm(0, 0, 0)
pwm.set_pwm(1, 0, 0)
continue;
if (event.type != pygame.KEYDOWN):
continue;
char = event.unicode;
if char=='w':
pwm.set_pwm(0, 1, 450)
pwm.set_pwm(1, 1, 450) #Move forward
print('forward')
elif char=='a':
pwm.set_pwm(1, 0, 500) # Move left
pwm.set_pwm(0, 0, 325)
print('left')
elif char=='d':
pwm.set_pwm(1, 0, 325) #Move right
pwm.set_pwm(0, 0,500)
print('right')
elif char=='s':
pwm.set_pwm(1, 0, 350) #Move backward
pwm.set_pwm(0, 0, 350)
print('backward')
elif char=='z':
print ("\nExiting"); #Exit
sys.exit();
|
{
"content_hash": "ca01690d485b932106edbe14f34f8a33",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 62,
"avg_line_length": 28.22772277227723,
"alnum_prop": 0.6324096808137496,
"repo_name": "MitchellB23/Micro-Olli",
"id": "1172cadca00f985eea501175672f00a617783d4d",
"size": "3354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MicroOlliDrive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15825"
}
],
"symlink_target": ""
}
|
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from rnacentral_pipeline.rnacentral.precompute.data import context as ctx
import rnacentral_pipeline.rnacentral.precompute.qa.incomplete_sequence as inco
from tests.rnacentral.precompute import helpers
@pytest.mark.parametrize(
"rna_id,rna_type,flag",
[ # pylint: disable=no-member
("URS0000400378_30527", "tRNA", False),
("URS000058E89C_39432", "rRNA", False),
("URS000061A10B_9606", "tRNA", False),
("URS0000866382_511983", "tRNA", False),
("URS000099C38D_77133", "rRNA", True),
("URS00009ED984_77133", "rRNA", True),
("URS00009F92C9_358574", "rRNA", True),
("URS0000A254A0_198431", "rRNA", True),
("URS00001C018D_77133", "rRNA", True),
("URS0000010837_7227", "misc_RNA", True),
],
)
def test_can_detect_incomplete_sequence(rna_id, rna_type, flag):
context, sequence = helpers.load_data(rna_id)
assert inco.validate(context, rna_type, sequence).has_issue == flag
@pytest.mark.parametrize(
"rna_id,rna_type,message",
[ # pylint: disable=no-member
(
"URS0000922E4C_6239",
"rRNA",
(
(
'Potential <a href="http://rfam.org/family/RF02543">Eukaryotic '
"large subunit ribosomal RNA</a> fragment"
)
),
),
],
)
def test_can_produce_correct_contamination_warnings(rna_id, rna_type, message):
context, sequence = helpers.load_data(rna_id)
assert inco.validate(ctx, rna_type, sequence).message == message
|
{
"content_hash": "f938da66ae443c1847c4541448999fc9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 84,
"avg_line_length": 36.644067796610166,
"alnum_prop": 0.6618871415356151,
"repo_name": "RNAcentral/rnacentral-import-pipeline",
"id": "5ccf544a83f85c5e7c3eee79d23c813a6770cded",
"size": "2187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/rnacentral/precompute/qa/incomplete_sequence_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18451"
},
{
"name": "Dockerfile",
"bytes": "3405"
},
{
"name": "Groovy",
"bytes": "6339"
},
{
"name": "HTML",
"bytes": "10430"
},
{
"name": "Makefile",
"bytes": "1197"
},
{
"name": "Nextflow",
"bytes": "104756"
},
{
"name": "PLpgSQL",
"bytes": "15906"
},
{
"name": "PostScript",
"bytes": "965516"
},
{
"name": "Python",
"bytes": "1623134"
},
{
"name": "Rust",
"bytes": "181197"
},
{
"name": "Shell",
"bytes": "23155"
}
],
"symlink_target": ""
}
|
import time
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
import logging
from typing import Any, Callable, Dict, List, \
Optional, Tuple, Type, Union
from django.conf import settings
from django.db import connection, models
from django.db.models import F
from analytics.models import Anomaly, BaseCount, \
FillState, InstallationCount, RealmCount, StreamCount, \
UserCount, installation_epoch, last_successful_fill
from zerver.lib.logging_util import log_to_file
from zerver.lib.timestamp import ceiling_to_day, \
ceiling_to_hour, floor_to_hour, verify_UTC
from zerver.models import Message, Realm, \
Stream, UserActivityInterval, UserProfile, models
## Logging setup ##
logger = logging.getLogger('zulip.management')
log_to_file(logger, settings.ANALYTICS_LOG_PATH)
# You can't subtract timedelta.max from a datetime, so use this instead
TIMEDELTA_MAX = timedelta(days=365*1000)
## Class definitions ##
class CountStat:
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta]=None) -> None:
self.property = property
self.data_collector = data_collector
# might have to do something different for bitfields
if frequency not in self.FREQUENCIES:
raise AssertionError("Unknown frequency: %s" % (frequency,))
self.frequency = frequency
if interval is not None:
self.interval = interval
elif frequency == CountStat.HOUR:
self.interval = timedelta(hours=1)
else: # frequency == CountStat.DAY
self.interval = timedelta(days=1)
def __str__(self) -> str:
return "<CountStat: %s>" % (self.property,)
class LoggingCountStat(CountStat):
def __init__(self, property: str, output_table: Type[BaseCount], frequency: str) -> None:
CountStat.__init__(self, property, DataCollector(output_table, None), frequency)
class DependentCountStat(CountStat):
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta]=None, dependencies: List[str]=[]) -> None:
CountStat.__init__(self, property, data_collector, frequency, interval=interval)
self.dependencies = dependencies
class DataCollector:
def __init__(self, output_table: Type[BaseCount],
pull_function: Optional[Callable[[str, datetime, datetime], int]]) -> None:
self.output_table = output_table
self.pull_function = pull_function
## CountStat-level operations ##
def process_count_stat(stat: CountStat, fill_to_time: datetime) -> None:
if stat.frequency == CountStat.HOUR:
time_increment = timedelta(hours=1)
elif stat.frequency == CountStat.DAY:
time_increment = timedelta(days=1)
else:
raise AssertionError("Unknown frequency: %s" % (stat.frequency,))
verify_UTC(fill_to_time)
if floor_to_hour(fill_to_time) != fill_to_time:
raise ValueError("fill_to_time must be on an hour boundary: %s" % (fill_to_time,))
fill_state = FillState.objects.filter(property=stat.property).first()
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s" % (stat.property, currently_filled))
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s" % (stat.property, fill_state.end_time))
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - time_increment
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s" % (stat.property,))
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError("Unknown value for FillState.state: %s." % (fill_state.state,))
if isinstance(stat, DependentCountStat):
for dependency in stat.dependencies:
dependency_fill_time = last_successful_fill(dependency)
if dependency_fill_time is None:
logger.warning("DependentCountStat %s run before dependency %s." %
(stat.property, dependency))
return
fill_to_time = min(fill_to_time, dependency_fill_time)
currently_filled = currently_filled + time_increment
while currently_filled <= fill_to_time:
logger.info("START %s %s" % (stat.property, currently_filled))
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + time_increment
logger.info("DONE %s (%dms)" % (stat.property, (end-start)*1000))
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
# We assume end_time is valid (e.g. is on a day or hour boundary as appropriate)
# and is timezone aware. It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime) -> None:
start_time = end_time - stat.interval
if not isinstance(stat, LoggingCountStat):
timer = time.time()
assert(stat.data_collector.pull_function is not None)
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time)
logger.info("%s run pull_function (%dms/%sr)" %
(stat.property, (time.time()-timer)*1000, rows_added))
do_aggregate_to_summary_table(stat, end_time)
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
if isinstance(stat, LoggingCountStat):
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.data_collector.output_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime) -> None:
cursor = connection.cursor()
# Aggregate into RealmCount
output_table = stat.data_collector.output_table
if output_table in (UserCount, StreamCount):
realmcount_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, COALESCE(sum(%(output_table)s.value), 0), '%(property)s',
%(output_table)s.subgroup, %%(end_time)s
FROM zerver_realm
JOIN %(output_table)s
ON
zerver_realm.id = %(output_table)s.realm_id
WHERE
%(output_table)s.property = '%(property)s' AND
%(output_table)s.end_time = %%(end_time)s
GROUP BY zerver_realm.id, %(output_table)s.subgroup
""" % {'output_table': output_table._meta.db_table,
'property': stat.property}
start = time.time()
cursor.execute(realmcount_query, {'end_time': end_time})
end = time.time()
logger.info("%s RealmCount aggregation (%dms/%sr)" % (
stat.property, (end - start) * 1000, cursor.rowcount))
# Aggregate into InstallationCount
installationcount_query = """
INSERT INTO analytics_installationcount
(value, property, subgroup, end_time)
SELECT
sum(value), '%(property)s', analytics_realmcount.subgroup, %%(end_time)s
FROM analytics_realmcount
WHERE
property = '%(property)s' AND
end_time = %%(end_time)s
GROUP BY analytics_realmcount.subgroup
""" % {'property': stat.property}
start = time.time()
cursor.execute(installationcount_query, {'end_time': end_time})
end = time.time()
logger.info("%s InstallationCount aggregation (%dms/%sr)" % (
stat.property, (end - start) * 1000, cursor.rowcount))
cursor.close()
## Utility functions called from outside counts.py ##
# called from zerver/lib/actions.py; should not throw any errors
def do_increment_logging_stat(zerver_object: Union[Realm, UserProfile, Stream], stat: CountStat,
subgroup: Optional[Union[str, int, bool]], event_time: datetime,
increment: int=1) -> None:
table = stat.data_collector.output_table
if table == RealmCount:
id_args = {'realm': zerver_object}
elif table == UserCount:
id_args = {'realm': zerver_object.realm, 'user': zerver_object}
else: # StreamCount
id_args = {'realm': zerver_object.realm, 'stream': zerver_object}
if stat.frequency == CountStat.DAY:
end_time = ceiling_to_day(event_time)
else: # CountStat.HOUR:
end_time = ceiling_to_hour(event_time)
row, created = table.objects.get_or_create(
property=stat.property, subgroup=subgroup, end_time=end_time,
defaults={'value': increment}, **id_args)
if not created:
row.value = F('value') + increment
row.save(update_fields=['value'])
def do_drop_all_analytics_tables() -> None:
UserCount.objects.all().delete()
StreamCount.objects.all().delete()
RealmCount.objects.all().delete()
InstallationCount.objects.all().delete()
FillState.objects.all().delete()
Anomaly.objects.all().delete()
def do_drop_single_stat(property: str) -> None:
UserCount.objects.filter(property=property).delete()
StreamCount.objects.filter(property=property).delete()
RealmCount.objects.filter(property=property).delete()
InstallationCount.objects.filter(property=property).delete()
FillState.objects.filter(property=property).delete()
## DataCollector-level operations ##
def do_pull_by_sql_query(property: str, start_time: datetime, end_time: datetime, query: str,
group_by: Optional[Tuple[models.Model, str]]) -> int:
if group_by is None:
subgroup = 'NULL'
group_by_clause = ''
else:
subgroup = '%s.%s' % (group_by[0]._meta.db_table, group_by[1])
group_by_clause = ', ' + subgroup
# We do string replacement here because cursor.execute will reject a
# group_by_clause given as a param.
# We pass in the datetimes as params to cursor.execute so that we don't have to
# think about how to convert python datetimes to SQL datetimes.
query_ = query % {'property': property, 'subgroup': subgroup,
'group_by_clause': group_by_clause}
cursor = connection.cursor()
cursor.execute(query_, {'time_start': start_time, 'time_end': end_time})
rowcount = cursor.rowcount
cursor.close()
return rowcount
def sql_data_collector(output_table: Type[BaseCount], query: str,
group_by: Optional[Tuple[models.Model, str]]) -> DataCollector:
def pull_function(property: str, start_time: datetime, end_time: datetime) -> int:
return do_pull_by_sql_query(property, start_time, end_time, query, group_by)
return DataCollector(output_table, pull_function)
def do_pull_minutes_active(property: str, start_time: datetime, end_time: datetime) -> int:
user_activity_intervals = UserActivityInterval.objects.filter(
end__gt=start_time, start__lt=end_time
).select_related(
'user_profile'
).values_list(
'user_profile_id', 'user_profile__realm_id', 'start', 'end')
seconds_active = defaultdict(float) # type: Dict[Tuple[int, int], float]
for user_id, realm_id, interval_start, interval_end in user_activity_intervals:
start = max(start_time, interval_start)
end = min(end_time, interval_end)
seconds_active[(user_id, realm_id)] += (end - start).total_seconds()
rows = [UserCount(user_id=ids[0], realm_id=ids[1], property=property,
end_time=end_time, value=int(seconds // 60))
for ids, seconds in seconds_active.items() if seconds >= 60]
UserCount.objects.bulk_create(rows)
return len(rows)
count_message_by_user_query = """
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, count(*),
'%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id
WHERE
zerver_userprofile.date_joined < %%(time_end)s AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
GROUP BY zerver_userprofile.id %(group_by_clause)s
"""
# Note: ignores the group_by / group_by_clause.
count_message_type_by_user_query = """
INSERT INTO analytics_usercount
(realm_id, user_id, value, property, subgroup, end_time)
SELECT realm_id, id, SUM(count) AS value, '%(property)s', message_type, %%(time_end)s
FROM
(
SELECT zerver_userprofile.realm_id, zerver_userprofile.id, count(*),
CASE WHEN
zerver_recipient.type = 1 THEN 'private_message'
WHEN
zerver_recipient.type = 3 THEN 'huddle_message'
WHEN
zerver_stream.invite_only = TRUE THEN 'private_stream'
ELSE 'public_stream'
END
message_type
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
JOIN zerver_recipient
ON
zerver_message.recipient_id = zerver_recipient.id
LEFT JOIN zerver_stream
ON
zerver_recipient.type_id = zerver_stream.id
GROUP BY
zerver_userprofile.realm_id, zerver_userprofile.id,
zerver_recipient.type, zerver_stream.invite_only
) AS subquery
GROUP BY realm_id, id, message_type
"""
# This query joins to the UserProfile table since all current queries that
# use this also subgroup on UserProfile.is_bot. If in the future there is a
# stat that counts messages by stream and doesn't need the UserProfile
# table, consider writing a new query for efficiency.
count_message_by_stream_query = """
INSERT INTO analytics_streamcount
(stream_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_stream.id, zerver_stream.realm_id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_stream
JOIN zerver_recipient
ON
zerver_stream.id = zerver_recipient.type_id
JOIN zerver_message
ON
zerver_recipient.id = zerver_message.recipient_id
JOIN zerver_userprofile
ON
zerver_message.sender_id = zerver_userprofile.id
WHERE
zerver_stream.date_created < %%(time_end)s AND
zerver_recipient.type = 2 AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
GROUP BY zerver_stream.id %(group_by_clause)s
"""
# Hardcodes the query needed by active_users:is_bot:day, since that is
# currently the only stat that uses this.
count_user_by_realm_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*),'%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realm
JOIN zerver_userprofile
ON
zerver_realm.id = zerver_userprofile.realm_id
WHERE
zerver_realm.date_created < %%(time_end)s AND
zerver_userprofile.date_joined >= %%(time_start)s AND
zerver_userprofile.date_joined < %%(time_end)s AND
zerver_userprofile.is_active = TRUE
GROUP BY zerver_realm.id %(group_by_clause)s
"""
# Currently hardcodes the query needed for active_users_audit:is_bot:day.
# Assumes that a user cannot have two RealmAuditLog entries with the same event_time and
# event_type in ['user_created', 'user_deactivated', etc].
# In particular, it's important to ensure that migrations don't cause that to happen.
check_realmauditlog_by_user_query = """
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
ral1.modified_user_id, ral1.realm_id, 1, '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realmauditlog ral1
JOIN (
SELECT modified_user_id, max(event_time) AS max_event_time
FROM zerver_realmauditlog
WHERE
event_type in ('user_created', 'user_deactivated', 'user_activated', 'user_reactivated') AND
event_time < %%(time_end)s
GROUP BY modified_user_id
) ral2
ON
ral1.event_time = max_event_time AND
ral1.modified_user_id = ral2.modified_user_id
JOIN zerver_userprofile
ON
ral1.modified_user_id = zerver_userprofile.id
WHERE
ral1.event_type in ('user_created', 'user_activated', 'user_reactivated')
"""
check_useractivityinterval_by_user_query = """
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, 1, '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_userprofile
JOIN zerver_useractivityinterval
ON
zerver_userprofile.id = zerver_useractivityinterval.user_profile_id
WHERE
zerver_useractivityinterval.end >= %%(time_start)s AND
zerver_useractivityinterval.start < %%(time_end)s
GROUP BY zerver_userprofile.id %(group_by_clause)s
"""
count_realm_active_humans_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
usercount1.realm_id, count(*), '%(property)s', NULL, %%(time_end)s
FROM (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = 'active_users_audit:is_bot:day' AND
subgroup = 'false' AND
end_time = %%(time_end)s
) usercount1
JOIN (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = '15day_actives::day' AND
end_time = %%(time_end)s
) usercount2
ON
usercount1.user_id = usercount2.user_id
GROUP BY usercount1.realm_id
"""
# Currently unused and untested
count_stream_by_realm_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realm
JOIN zerver_stream
ON
zerver_realm.id = zerver_stream.realm_id AND
WHERE
zerver_realm.date_created < %%(time_end)s AND
zerver_stream.date_created >= %%(time_start)s AND
zerver_stream.date_created < %%(time_end)s
GROUP BY zerver_realm.id %(group_by_clause)s
"""
## CountStat declarations ##
count_stats_ = [
# Messages Sent stats
# Stats that count the number of messages sent in various ways.
# These are also the set of stats that read from the Message table.
CountStat('messages_sent:is_bot:hour',
sql_data_collector(UserCount, count_message_by_user_query, (UserProfile, 'is_bot')),
CountStat.HOUR),
CountStat('messages_sent:message_type:day',
sql_data_collector(UserCount, count_message_type_by_user_query, None), CountStat.DAY),
CountStat('messages_sent:client:day',
sql_data_collector(UserCount, count_message_by_user_query, (Message, 'sending_client_id')),
CountStat.DAY),
CountStat('messages_in_stream:is_bot:day',
sql_data_collector(StreamCount, count_message_by_stream_query, (UserProfile, 'is_bot')),
CountStat.DAY),
# Number of Users stats
# Stats that count the number of active users in the UserProfile.is_active sense.
# 'active_users_audit:is_bot:day' is the canonical record of which users were
# active on which days (in the UserProfile.is_active sense).
# Important that this stay a daily stat, so that 'realm_active_humans::day' works as expected.
CountStat('active_users_audit:is_bot:day',
sql_data_collector(UserCount, check_realmauditlog_by_user_query, (UserProfile, 'is_bot')),
CountStat.DAY),
# Sanity check on 'active_users_audit:is_bot:day', and a archetype for future LoggingCountStats.
# In RealmCount, 'active_users_audit:is_bot:day' should be the partial
# sum sequence of 'active_users_log:is_bot:day', for any realm that
# started after the latter stat was introduced.
LoggingCountStat('active_users_log:is_bot:day', RealmCount, CountStat.DAY),
# Another sanity check on 'active_users_audit:is_bot:day'. Is only an
# approximation, e.g. if a user is deactivated between the end of the
# day and when this stat is run, they won't be counted. However, is the
# simplest of the three to inspect by hand.
CountStat('active_users:is_bot:day',
sql_data_collector(RealmCount, count_user_by_realm_query, (UserProfile, 'is_bot')),
CountStat.DAY, interval=TIMEDELTA_MAX),
# User Activity stats
# Stats that measure user activity in the UserActivityInterval sense.
CountStat('1day_actives::day',
sql_data_collector(UserCount, check_useractivityinterval_by_user_query, None),
CountStat.DAY, interval=timedelta(days=1)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('15day_actives::day',
sql_data_collector(UserCount, check_useractivityinterval_by_user_query, None),
CountStat.DAY, interval=timedelta(days=15)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('minutes_active::day', DataCollector(UserCount, do_pull_minutes_active), CountStat.DAY),
# Rate limiting stats
# Used to limit the number of invitation emails sent by a realm
LoggingCountStat('invites_sent::day', RealmCount, CountStat.DAY),
# Dependent stats
# Must come after their dependencies.
# Canonical account of the number of active humans in a realm on each day.
DependentCountStat('realm_active_humans::day',
sql_data_collector(RealmCount, count_realm_active_humans_query, None),
CountStat.DAY,
dependencies=['active_users_audit:is_bot:day', '15day_actives::day'])
]
COUNT_STATS = OrderedDict([(stat.property, stat) for stat in count_stats_])
|
{
"content_hash": "b2a103460afb7e8a9f6b117e38869371",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 106,
"avg_line_length": 43.37105751391466,
"alnum_prop": 0.6524361551952774,
"repo_name": "dhcrzf/zulip",
"id": "3b3c5158d8063f52ebb341a7b5303f22dd52b66e",
"size": "23377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analytics/lib/counts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "436713"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "673974"
},
{
"name": "JavaScript",
"bytes": "2951950"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "72908"
},
{
"name": "Python",
"bytes": "6188005"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "118284"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from ..arithmetic import AddScalarVolumes
def test_AddScalarVolumes_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume1=dict(argstr='%s',
position=-3,
),
inputVolume2=dict(argstr='%s',
position=-2,
),
order=dict(argstr='--order %s',
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = AddScalarVolumes.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_AddScalarVolumes_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = AddScalarVolumes.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
{
"content_hash": "07c734928c45b0a4c7382e239217833e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 25.6,
"alnum_prop": 0.6258680555555556,
"repo_name": "mick-d/nipype",
"id": "29a2a157e6546d9eee3fc0a3ec8e4300abb5968e",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import string
import copy
class GetPot_variable:
def __init__(self, name, str_value):
self.name = name
self.take(str_value)
def take(self, str_value):
self.value = string.split(str_value)
self.original = str_value
class GetPot:
def __init__(self, Argv=None, Filename=None, SectionsEnabledF=True):
# Make the default argument for Argv=[], otherwise
# strange things may occur, when somewhere the cursor
# is set to len(Argv) - 1. Even if the user passes a dangerous
# [], the case is caught.
if Argv is None: Argv = [""]
# in case a search for a specific argument failed,
# it effects the next functions block.
self.search_failed_f = 0
# indeces of arguments that do not start with minus
self.__idx_nominus = []
# vector of identified variables
# (arguments of the form "variable=value")
self.variables = [ ]
self.section_list = []
# cursor oriented functions (next(), follow(), etc.):
# pointer to actual position to be parsed.
self.cursor = 0
self.nominus_cursor = -1
self.search_loop_f = 1
self.__prefix = ""
# set up the internal database
if Filename is not None:
Argv = [ Filename ]
parsed_argv = self.__read_in_file(Filename)
try: Argv.extend(parsed_argv)
except: pass
self.argv = self.__parse_argument_vector(Argv, SectionsEnabledF)
if len(self.argv) == 0: self.argv = [""]
def __parse_argument_vector(self, argv_, SectionsEnabledF=True):
self.section = ''
section_stack = []
arg_list = []
i = -1
for arg in filter(lambda arg: arg.strip() != "", argv_):
i += 1
arg = arg.strip() # avoid problems with "\r\n"
if i == 0: arg_list.append(arg); continue
# [section] ?
if SectionsEnabledF and len(arg) > 1 and arg[0] == '[' and arg[-1] == ']':
name = self.DBE_expand_string(arg[1:-1])
self.section = self.__process_section_label(name, section_stack)
if self.section not in self.section_list:
self.section_list.append(self.section)
arg_list.append(arg)
else:
arg = self.section + self.DBE_expand_string(arg[:])
arg_list.append(arg)
# no-minus argument ?
if arg[0] != '-': self.__idx_nominus.append(i)
# assignment ?
for k in range(len(arg)-1):
if arg[k] == '=':
v = self.__find_variable(arg[0:k])
if v is None:
self.variables.append(GetPot_variable(arg[0:k], arg[k+1:]))
else:
v.take(arg[k+1:])
return arg_list
def __read_in_file(self, Filename):
"""Parses a file and returns a vector of arguments."""
try:
fh = open(Filename, "rb")
except:
raise "GetPot: could not open file '%s'" % Filename
brute_tokens = []
token = 0
while token != '':
self.__skip_whitespace(fh)
token = self.__get_next_token(fh)
brute_tokens.append(token)
# -- reduce expressions of token1'='token2 to a single
# string 'token1=token2'
# -- copy everything into 'argv'
# -- arguments preceded by something like '[' name ']' (section)
# produce a second copy of each argument with a __prefix '[name]argument'
i1 = 0; i2 = 1; i3 = 2;
argv = []
# loop over brute tokens to create argv vector
while i1 < len(brute_tokens):
SRef = brute_tokens[i1];
# concatinate 'variable' '=' 'value' to 'variable=value'
if i2 < len(brute_tokens) and brute_tokens[i2] == '=':
if i3 >= len(brute_tokens):
argv.append(brute_tokens[i1] + brute_tokens[i2])
else:
argv.append(brute_tokens[i1] + brute_tokens[i2] + brute_tokens[i3])
i1 = i3 + 1; i2 = i3 + 2; i3 = i3 + 3;
continue
else:
argv.append(SRef)
i1 = i2; i2 = i3; i3 += 1;
return argv
def __skip_whitespace(self, FH):
"""Skips whitespaces: space, tabulator, newline and #-comments."""
tmp = ' '
while 1+1==2:
while tmp in [' ', '\t', '\n', '\r']:
tmp = FH.read(1)
if tmp == '': return # end of file ?
# found a non whitespace
if tmp != '#':
# put the last read letter back
FH.seek(-1,1) # (seek -1 backwards from current position (code=1))
return
# '#' - comment => skip until end of line
while tmp != '\n':
tmp = FH.read(1)
if tmp == '': return # end of file ?
def __get_next_token(self, FH):
"""Reads next chunk of characters that are not separated by
whitespace. Quotes and ${ ... }, however, allow to embrace whitespaces."""
token = ''; tmp = 0; last_letter = 0
while 1+1 == 2:
last_letter = tmp; tmp = FH.read(1);
if tmp == '' or \
((tmp == ' ' or tmp == '\t' or tmp == '\n') and last_letter != '\\'):
return token
elif tmp == '\'' and not last_letter == '\\':
# QUOTES: un-backslashed quotes => it's a string
token += self.__get_string(FH)
continue
elif tmp == "{" and last_letter == '$':
token += '{' + self.__get_until_closing_bracket(FH)
continue
elif tmp == "$" and last_letter == '\\':
token += tmp; tmp = 0 # so that last_letter will become = 0, not '$'
continue
elif tmp == '\\' and not last_letter == '\\':
continue # don't append un-backslashed backslashes
token += tmp
def __get_string(self, FH):
"""Reads characters until the next un-backslashed quote."""
str = ''; tmp = 0
while 1 + 1 == 2:
last_letter = tmp; tmp = FH.read(1)
if tmp == '': return str
# un-backslashed quotes => it's the end of the string
elif tmp == '\'' and not last_letter == '\\': return str
elif tmp == '\\' and not last_letter == '\\': continue # don't append
str += tmp
def __get_until_closing_bracket(self, FH):
"""Reads characters until the next un-backslashed '}'."""
str = ''; tmp = 0
brackets = 1
while 1 + 1 == 2:
last_letter = tmp; tmp = FH.read(1)
if tmp == '': return str
elif tmp == '{' and last_letter == '$': brackets += 1
elif tmp == '}':
brackets -= 1
# un-backslashed brackets => it's the end of the string
if brackets == 0: return str + '}'
elif tmp == '\\' and not last_letter == '\\':
continue # do not append an unbackslashed backslash
str += tmp
def absorb(self, Other):
tmp = copy.copy(Other.argv)
# Delete the application name
del tmp[0]
self.__parse_argument_vector(tmp)
def __process_section_label(self, label, section_stack):
# 1) subsection of actual section ('./' prefix)
if len(label) >= 2 and label[:2] == "./":
label = label[2:]
# a single [./] means 'the same section'
# 2) subsection of parent section ('../' prefix)
elif label[0:3] == "../":
while label[0:3] == "../":
if len(section_stack) != 0: section_stack.pop()
label = label[3:]
# 3) subsection of the root-section
else:
del section_stack[:]
# 4) parse section name for slashes
if label != "":
i=0
while i < len(label):
if label[i] == '/':
section_stack.append(label[0:i])
if i+1 < len(label):
label = label[i+1:]
i = 0
else:
i += 1
section_stack.append(label)
section = ""
for s in section_stack:
section += s + '/'
return section
def __convert_to_type(self, String, Default):
"""Converts a string into an object of the same type as 'Default'.
Returns 'None' in case this is not possible."""
if type(Default) == type(""):
# character string
return String
elif type(Default) == type(0.):
# float
try: return float(String)
except: return Default
elif type(Default) == type(0):
# integer
if len(String) >= 2 and String[0:2] == "0x": start_i = 2
elif len(String) >=3 and String[0:3] == "-0x": start_i = 3
else:
# normal integer, not a hexadecimal
try: return int(String)
except: return Default
# a hexadecimal number
number = 0;
for c in String[start_i:len(String)]:
c = int(c)
if c >= int('0') and c <= int('9'): digit = c - int('0')
elif c >= int('a') and c <= int('f'): digit = c - int('a')
elif c >= int('A') and c <= int('F'): digit = c - int('A')
else: break
number *= 16
number += digit
if start_i == 2: return number
else: return -number
def __get_remaining_string(self, String, Start):
"""Checks if 'String' begins with 'Start' and returns the remaining String.
Returns None if String does not begin with Start."""
if Start == "": return String
if string.find(String, Start) == 0: return String[len(Start):]
else: return None
def __deal_propperly_with_array_arguments(self, Args):
tmp_args = []
for arg in Args:
if type(arg) == list: tmp_args.extend(arg)
else: tmp_args.append(arg)
return tmp_args
# -- search for a certain option and set cursor to position
def search(self, *Args):
"""Search for a command line argument and set cursor. Starts search
from current cursor position. Only wraps arround the end, if 'loop'
is enabled. Returns 'False' if nothing was found, True otherwise."""
# make sure to propperly deal with arrays being passed as arguments
Args = self.__deal_propperly_with_array_arguments(Args)
if self.cursor >= len(self.argv)-1:
self.cursor = len(self.argv)-1
self.search_failed_f = 1
old_cursor = self.cursor
def check_match(i0, i1, Args, Argv=self.argv, Prefix=self.__prefix, obj=self):
"""Checks if one of the arguments in Args matches an argument in sequence."""
for i in range(i0, i1):
for arg in Args:
if Prefix + arg == Argv[i]:
obj.cursor = i; obj.search_failed_f = 0
return True
return False
# first run: from cursor to end
if check_match(self.cursor, len(self.argv), Args) == 1: return True
if self.search_loop_f == 0: return False
# second run: from 1 to old_cursor position
# (note, that old_cursor can be at maximum = len(self.argv),
# the range function contains therefore only values until
# "len(self.argv) - 1")
if check_match(1, old_cursor, Args) == 1: return 1
return False
def disable_loop(self):
self.search_loop_f = 0
def enable_loop(self):
self.search_loop_f = 1
# -- reset cursor to initial position
def reset_cursor(self):
self.search_failed_f = 0; self.cursor = 0
def search_failed(self):
return self.search_failed_f
def init_multiple_occurrence(self):
self.disable_loop(); self.reset_cursor()
def set_prefix(self, Prefix):
if Prefix[-1] != "/": Prefix += "/"
self.__prefix = Prefix
# (*) direct access to command line arguments through []-operator
def __getitem__(self, Idx):
"""Returns a specific argument indexed by Idx or 'None' if this
does not exist."""
if Idx < 0 or Idx >= len(self.argv): return None
return self.argv[Idx]
def get(self, Idx, Default):
"""Looks if the type of argv[Idx] matches the type of the default argument.
If it does not, the default argument is returned."""
if self[Idx] is None: return Default
return self.__convert_to_type(self[Idx], Default)
def size(self):
"""Returns the size of the argument list."""
return len(self.argv)
# -- get argument at cursor++
def next(self, Default):
"""Tests if the following argument is of the same type as Default. If not
Default is returned. Note, that if the following argument does not contain
the 'prefix', the same way the Default argument is returned."""
if self.search_failed_f == 1: return Default
self.cursor += 1
if self.cursor >= len(self.argv): self.cursor = len(self.argv)-1; return Default
if self.__prefix == "": return self.__convert_to_type(self.argv[self.cursor], Default)
remain = self.__get_remaining_string(self.argv[self.cursor], self.__prefix)
if remain is not None: return self.__convert_to_type(remain, Default)
else: return Default
# -- search for option and get argument at cursor++
def follow(self, Default, *Args):
# make sure to propperly deal with arrays being passed as arguments
Args = self.__deal_propperly_with_array_arguments(Args)
for arg in Args:
if self.search(arg) == 1:
return self.next(Default)
return Default
def nominus_followers(self, *Args):
"""Returns a list of strings of arguments that directly follow
the option but do not start with a minus."""
# make sure to propperly deal with arrays being passed as arguments
Args = self.__deal_propperly_with_array_arguments(Args)
result_list = []
for arg in Args:
if self.search(arg) == False: continue
while 1 + 1 == 2:
self.cursor += 1
if self.cursor >= len(self.argv):
self.cursor = len(self.argv)-1
break
if len(self.argv[self.cursor]) >= 1:
if self.argv[self.cursor][0] == "-":
break
else:
result_list.append(self.argv[self.cursor])
return result_list
def direct_follow(self, Default, Arg):
remaining_string = self.__match_starting_string(Arg)
if remaining_string is None:
return Default
self.cursor += 1
if self.cursor >= len(self.argv): self.cursor = len(self.argv)
return self.__convert_to_type(remaining_string, Default)
# helper to find directly followed arguments
def __match_starting_string(self, StartString):
"""Searches argument list for next occurrence of 'StartString', beginning
from current cursor position. Returns string after StartString if found.
Returns None if no argument contains the starting string."""
old_cursor = self.cursor
self.search_failed_f = 1
# first run: from cursor to end
if self.cursor < len(self.argv):
for i in range(old_cursor, len(self.argv)):
if string.find(self.argv[i], StartString) == 0:
self.cursor = i
self.search_failed_f = 0
return self.argv[i][len(StartString):]
if self.search_loop_f == 0: return None
# second run: from 1 to old_cursor position
# (note, that old_cursor can be at maximum = len(self.argv),
# the range function contains therefore only values until
# "len(self.argv) - 1")
for i in range(1, old_cursor):
if string.find(self.argv[i], StartString) == 0:
self.cursor = i
self.search_failed_f = 0
return self.argv[i][len(StartString):]
return None
def options_contain(self, FlagList):
"""Go through all arguments that start with a '-' and watch if they
contain a flag in flaglist. In case a prefix is specified, the option
must be preceeded with it, e.g. 'pack-options/-cvx'."""
for arg in self.argv:
if self.__prefix != "": arg = self.__get_remaining_string(arg, self.__prefix)
if arg is not None and len(arg) >= 2 and arg[0] == '-' and arg[1] != '-' \
and self.__check_flags(arg, FlagList) == 1: return 1
return 0
def argument_contains(self, Idx, FlagList):
"""Check if an argument that is associated with a certain index contains
a certain flag. If a prefix is specified, the index indicates the number
inside the list."""
if Idx < 0 or Idx > len(self.argv): return 0
if self.__prefix == "":
# search argument for any flag in flag list
return self.__check_flags(self.argv[Idx], FlagList)
# if a prefix is set, then the argument index is the index
# inside the 'namespace'
# => only check list of arguments that start with prefix
no_matches = 0
for i in range(len(self.argv)):
remain = self.__get_remaining_string(self.argv[i], self.__prefix)
if remain is not None:
no_matches += 1
if no_matches == Idx:
return self.__check_flags(remain, FlagList)
# no argument in this namespace
return 0
def __check_flags(self, Str, FlagList):
"""Does a given string 'Str' contain a flag in 'FlagList' ?"""
for l in Str:
for f in FlagList:
if f == l:
return 1
return 0
# (*) nominus arguments
def reset_nominus_cursor(self):
self.nominus_cursor = -1
def nominus_vector(self):
v_nm = []
for i in self.__idx_nominus:
nominus = self.argv[i]
tmp = self.__get_remaining_string(nominus, self.__prefix)
if tmp is not None: v_nm.append(tmp)
return v_nm
def nominus_size(self):
return len(self.__idx_nominus)
def next_nominus(self):
if self.nominus_cursor >= len(self.__idx_nominus)-1: return None
self.nominus_cursor += 1
return self.argv[self.__idx_nominus[self.nominus_cursor]]
# (*) variables
# helper to find arguments
def get_variable_names(self):
# return all variables for given prefix
vars = []
for v in self.variables:
tmp = self.__get_remaining_string(v.name, self.__prefix)
if tmp is not None: vars.append(tmp)
return vars
def get_section_names(self):
return self.section_list
# helper to find arguments
def __find_variable(self, VarName):
"""Search for a variable in the array of variables."""
v_name = self.__prefix + VarName
for v in self.variables:
if v.name == v_name: return v
return None
# -- scalar values and vectors
def __call__(self, VarName, Default, Idx=-1):
"""Returns 'None' in case variable was not found or type did not match."""
v = self.__find_variable(VarName)
if v is None:
return Default
if Idx == -1:
# variable has to be considered as a single value
return self.__convert_to_type(v.original, Default)
else:
# variable interpreted as vector
if Idx >= len(v.value):
return Default
return self.__convert_to_type(v.value[Idx], Default)
def vector_variable_size(self):
return self.variables.size()
def Print(self):
print "argc = %i" % len(self.argv)
for arg in self.argv:
print "%s" % arg
# (*) dollar bracket expressions (DBEs) ------------------------------------
#
# 1) Entry Function: DBE_expand_string()
# Takes a string such as
#
# "${+ ${x} ${y}} Subject-${& ${section} ${subsection}}: ${title}"
#
# calls DBE_expand() for each of the expressions
#
# ${+ ${x} ${y}}
# ${& ${section} ${subsection}}
# ${Title}
#
# and returns the string
#
# "4711 Subject-1.01: Mit den Clowns kamen die Schwaene"
#
# assuming that
# x = "4699"
# y = "12"
# section = "1."
# subsection = "01"
# title = "Mit den Clowns kamen die Schwaene"
#
# 2) DBE_expand():
#
# checks for the command, i.e. the 'sign' that follows '${'
# divides the argument list into sub-expressions using
# DBE_get_expr_list()
#
# ${+ ${x} ${y}} -> "${x}" "${y}"
# ${& ${section} ${subsection}} -> "${section}" "${subsection}"
# ${Title} -> Nothing, variable expansion
#
# 3) DBE_expression_list():
#
# builds a vector of unbracketed whitespace separated strings, i.e.
#
# " ${Number}.a ${: Das Marmorbild} AB-${& Author= ${Eichendorf}-1870}"
#
# is split into a vector
#
# [0] ${Number}.a
# [1] ${: Das Marmorbild}
# [2] ${& Author= ${Eichendorf}}
#
# The each sub-expression is expanded using expand().
#---------------------------------------------------------------------------
def DBE_expand_string(self, String):
"""Parses for closing operators '${ }' and expands them letting
white spaces and other letters as they are."""
new_string = ""
open_brackets = 0
for i in range(len(String)):
if i < len(String)-2 and String[i:i+2] == "${":
if open_brackets == 0: first = i+2;
open_brackets += 1;
elif String[i] == "}" and open_brackets > 0:
open_brackets -= 1
if open_brackets == 0:
new_string += self.DBE_expand(String[first:i])
elif open_brackets == 0:
new_string += String[i]
return new_string
def DBE_get_expr_list(self, String, ExpectedNumber):
"""Separates expressions by non-bracketed whitespaces, expands them
and puts them into a list."""
i = 0
# (1) eat initial whitespaces
for letter in String:
if letter != " " and letter != "\t" and letter != "\n":
break
i += 1
expr_list = []
open_brackets = 0
start_idx = []
start_new_string = i
L = len(String)
# (2) search for ${ } expressions ...
while i < L:
letter = String[i]
# whitespace -> end of expression
if (letter == " " or letter == "\t" or letter == "\n") \
and open_brackets == 0:
expr_list.append(String[start_new_string:i])
for i in range(i+1, L):
letter = String[i]
if letter != " " and letter != "\t" and letter != "\n":
start_new_string = i
break
else:
# end of expression list
if len(expr_list) < ExpectedNumber:
expr_list.extend(["<< ${ }: missing arguments>>"] * (ExpectedNumber - len(expr_list)))
return expr_list
# dollar-bracket expression
if len(String) >= i+2 and String[i:i+2] == "${":
open_brackets += 1
start_idx.append(i+2)
elif letter == "}" and open_brackets > 0:
start = start_idx.pop()
Replacement = self.DBE_expand(String[start:i])
if start-2 <= 0: String = Replacement + String[i+1:]
else: String = String[:start-2] + Replacement + String[i+1:]
L = len(String)
i = start + len(Replacement) - 3
open_brackets -= 1
i += 1
expr_list.append(String[start_new_string:i])
if len(expr_list) < ExpectedNumber:
expr_list.extend(["<< ${ }: missing arguments>>"] * (ExpectedNumber - len(expr_list)))
return expr_list
def DBE_get_variable(self, VarName):
SECURE_Prefix = self.__prefix
for p in [self.section, ""]:
self.__prefix = p
# (1) first search in currently active section
# (2) search in root name space
var = self.__find_variable(VarName)
if type(var) != type(None):
self.__prefix = SECURE_Prefix
return var
self.__prefix = SECURE_Prefix
return "<<${ } variable '%s' undefined>>" % VarName
def DBE_expand(self, Expr):
# ${: } pure text
if Expr[0] == ":":
return Expr[1:]
# ${& expr expr ... } text concatination
elif Expr[0] == "&":
A = self.DBE_get_expr_list(Expr[1:], 1)
return reduce(lambda a,b: "%s%s" % (a, b), A)
# ${<-> expr expr expr} text replacement
elif len(Expr) >= 3 and Expr[0:3] == "<->":
A = self.DBE_get_expr_list(Expr[3:], 3)
# string old new
return string.replace(A[0], A[1], A[2])
# ${+ ...}, ${- ...}, ${* ...}, ${/ ...} expressions
elif Expr[0] == "+":
A = self.DBE_get_expr_list(Expr[1:], 2)
return "%e" % (reduce(lambda a, b:
self.__convert_to_type(a, 0.) + self.__convert_to_type(b,0.),
A))
elif Expr[0] == "-":
A = self.DBE_get_expr_list(Expr[1:], 2)
return "%e" % reduce(lambda a, b:
self.__convert_to_type(a, 0.) - self.__convert_to_type(b,0.),
A)
elif Expr[0] == "*":
A = self.DBE_get_expr_list(Expr[1:], 2)
return "%e" % reduce(lambda a, b:
self.__convert_to_type(a, 0.) * self.__convert_to_type(b,0.),
A)
elif Expr[0] == "/":
A = self.DBE_get_expr_list(Expr[1:], 2)
Q = self.__convert_to_type(A[0], 0.)
if Q == 0: return repr(0.)
for q in A[1:]:
q = self.__convert_to_type(q, 0.)
if q == 0.0: return repr(0.)
Q /= q
return "%e" % Q
# ${^ ... } power expressions
elif Expr[0] == "^":
A = self.DBE_get_expr_list(Expr[1:], 2)
return "%e" % reduce(lambda a, b:
self.__convert_to_type(a, 0.) ** self.__convert_to_type(b,0.),
A)
# ${== } ${<= } ${>= } comparisons (return the number of the first 'match'
elif len(Expr) >= 2 and \
(Expr[0:2] == "==" or Expr[0:2] == ">=" or Expr[0:2] == "<=" or \
Expr[0:1] == ">" or Expr[0:1] == "<"):
# differentiate between two and one sign operators
if Expr[1] == "=": OP = Expr[0:2]; A = self.DBE_get_expr_list(Expr[2:], 2)
else: OP = Expr[0]; A = self.DBE_get_expr_list(Expr[1:], 2)
x_orig = A[0]
x = self.__convert_to_type(x_orig, 1e37)
i = 1
for y_orig in A[1:]:
y = self.__convert_to_type(y_orig, 1e37)
# set the strings as reference if one wasn't a number
if x == 1e37 or y == 1e37: xc = x_orig; y = y_orig;
else: xc = x
if OP == "==" and xc == y: return repr(i)
elif OP == ">=" and xc >= y: return repr(i)
elif OP == "<=" and xc <= y: return repr(i)
elif OP == ">" and xc > y: return repr(i)
elif OP == "<" and xc < y: return repr(i)
i += 1
# nothing fulfills the condition => return 0
return repr(0)
# ${?? expr expr} select
elif len(Expr) >=2 and Expr[0:2] == "??":
A = self.DBE_get_expr_list(Expr[2:], 2)
X = self.__convert_to_type(A[0], 1e37)
# last element is always the default argument
if X == 1e37 or X < 0 or X >= len(A) - 1:
return A[-1]
# round X to closest integer
return A[int(X+0.5)]
# ${? expr expr expr} if then else conditions
elif Expr[0] == "?":
A = self.DBE_get_expr_list(Expr[1:], 2)
if self.__convert_to_type(A[0], 0.0) == 1.0: return A[1]
elif len(A) > 2: return A[2]
# ${! expr} maxro expansion
elif Expr[0] == "!":
Var = self.DBE_get_variable(Expr[1:])
# error
if type(Var) == type(""): return Var
A = self.DBE_get_expr_list(Var.original, 2)
return A[0]
# ${@: } - string subscription
elif len(Expr) >= 2 and Expr[0:2] == "@:":
A = self.DBE_get_expr_list(Expr[2:], 2)
X = self.__convert_to_type(A[1], 1e37)
# last element is always the default argument
if X == 1e37 or X < 0 or X >= len(A[0]) - 1:
return "<<1st index out of range>>"
if len(A) > 2:
Y = self.__convert_to_type(A[2], 1e37)
if Y != 1e37 and Y > 0 and Y <= len(A[0]) - 1 and Y > X:
return A[0][int(X+0.5):int(Y+1.5)]
elif Y == -1:
return A[0][int(X+0.5):]
return "<<2nd index out of range>>"
else:
return A[0][int(X+0.5)]
# ${@ } - vector subscription
elif Expr[0] == "@":
A = self.DBE_get_expr_list(Expr[1:], 2)
Var = self.DBE_get_variable(A[0])
# error
if type(Var) == type(""): return Var
X = self.__convert_to_type(A[1], 1e37)
# last element is always the default argument
if X == 1e37 or X < 0 or X >= len(Var.value):
return "<<1st index out of range>>"
if len(A) > 2:
Y = self.__convert_to_type(A[2], 1e37)
if Y != 1e37 and Y > 0 and Y <= len(Var.value) and Y > X:
Vec = Var.value[int(X+0.5):int(Y+1.5)]
elif Y == -1:
Vec = Var.value[int(X+0.5):]
else:
return "<<2nd index out of range>>"
return reduce(lambda a,b: "%s %s" % (a,b), Vec)
else:
return Var.value[int(X+0.5)]
A = self.DBE_get_expr_list(copy.copy(Expr), 1)
B = self.DBE_get_variable(A[0])
if type(B) == type(""): return B
else: return B.original
# (*) unidentified flying objects
def unidentified_arguments(self, *Knowns):
known_x = []
# convert lists
for k in Knowns:
if type(k) == list: known_x.extend(k)
else: known_x.append(k)
ufos = []
for it in self.argv[1:]:
arg = self.__get_remaining_string(it, self.__prefix)
if arg not in known_x: ufos.append(it)
return ufos
def unidentified_options(self, *Knowns):
known_x = []
# convert lists
for k in Knowns:
if type(k) == list: known_x.extend(k)
else: known_x.append(k)
ufos = []
for it in self.argv[1:]:
arg = self.__get_remaining_string(it, self.__prefix)
if len(arg) < 2 or arg[0] != '-': continue
if arg not in known_x: ufos.append(it)
return ufos
def unidentified_flags(self, KnownFlags, ArgumentNumber=-1):
ufos = ""
if ArgumentNumber == -1:
# (*) search through all options with one single '-'
for it in self.argv[1:]:
arg = self.__get_remaining_string(it, self.__prefix)
if len(arg) < 2: continue
elif arg[0] != '-': continue
elif arg[1] == '-': continue
for letter in arg[1:]:
if letter not in KnownFlags: ufos += letter;
else:
no_matches = 0
for it in self.argv[1:]:
Remain = self.__get_remaining_string(it, self.__prefix)
if Remain != "":
no_matches += 1
if no_matches == ArgumentNumber:
# -- the right argument number inside the section is found
# => check it for flags
for letter in Remain:
if letter not in KnownFlags: ufos += letter;
return ufos
return ufos
def unidentified_variables(self, *Knowns):
ufos = []
for it in self.variables:
var_name = self.__get_remaining_string(it.name, self.__prefix)
if var_name not in Knowns: ufos.append(it.name)
return ufos
def unidentified_sections(self, *Knowns):
ufos = []
for it in self.section_list:
sec_name = self.__get_remaining_string(it, self.__prefix)
if sec_name not in Knowns: ufos.append(it)
return ufos
def unidentified_nominuses(self, Knowns):
ufos = []
for it in self.argv[1:]:
arg = self.__get_remaining_string(it, self.__prefix)
# only 'real nominuses'
if len(arg) < 1 or arg[0] == '-': continue
elif arg[0] == '[' and arg[-1] == ']': continue # section label
elif '=' in arg: continue # variable definition
if arg not in Knowns: ufos.append(it)
return ufos
|
{
"content_hash": "91bf5b3979b37cbee5888748721232e0",
"timestamp": "",
"source": "github",
"line_count": 929,
"max_line_length": 110,
"avg_line_length": 38.265877287405814,
"alnum_prop": 0.4839517285999606,
"repo_name": "coderjames/pascal",
"id": "64908138a4c394a54261965deccf7c1dfd4c3e20",
"size": "36594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quex-0.63.1/quex/input/command_line/GetPot.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "194851"
},
{
"name": "C++",
"bytes": "78624"
},
{
"name": "Delphi",
"bytes": "5659"
},
{
"name": "Python",
"bytes": "1350210"
}
],
"symlink_target": ""
}
|
from pymel.core import *
import math as m
def removeDuplicates(seq):
# Not order preserving
myset = set(seq)
return list(myset)
def processLightMaps(tiles, camera, bakeSet, bakeGroup, bakeObject):
import maya.mel as mel
myBakeSet = ls (bakeSet, type='textureBakeSet')
myBakeSet[0].uvRange.set(2)
for i in tiles:
rawid = i-1001
tu = int (rawid % 10)
tv = int (m.floor(rawid/10))
myBakeSet[0].prefix.set('baked_%s' % str(i))
myBakeSet[0].uMin.set(tu)
myBakeSet[0].uMax.set(tu+1)
myBakeSet[0].vMin.set(tv)
myBakeSet[0].vMax.set(tv+1)
mel.eval( 'convertLightmap -camera %s -bo %s %s %s' % (camera, bakeSet, bakeGroup, bakeObject) )
def getTiles(bakeObject):
import pymel.core.runtime as pyrt
udims = []
select (bakeObject)
sizeUVs = polyEvaluate (uv=True)
pyrt.ConvertSelectionToUVs()
getUVs = ls(sl=True, fl=True)
removeUVs = getUVs
while (sizeUVs > 0):
select (removeUVs[0])
pyrt.SelectUVShell()
shellUVs = ls (sl=True, fl=True)
UVs = polyEvaluate (bc2=True)
SS = m.floor (UVs[0][0])
TT = m.floor (UVs[1][0])
TT = m.fabs (TT)
udim = int (TT * 10 + SS + 1001)
udims.append(udim)
removeUVs = list (set(removeUVs) - set(shellUVs))
sizeUVs = len(removeUVs)
return removeDuplicates (udims)
def checkBakeSet(bakeSet):
status = True
if objExists(bakeSet) == False:
print("Warning: no bakeset exists with that name!")
status = False
return status
def getBakeConnection(myObj):
status = False
lsShape = myObj.getShape()
lsConnections = lsShape.outputs()
for i in lsConnections:
if i.type() == 'textureBakeSet':
status = i.name()
if status == False:
print 'not connected to bake set'
return status
def getSGConnection(myObj):
status = False
lsShape = myObj.getShape()
lsConnections = lsShape.outputs()
for i in lsConnections:
if i.type() == 'shadingEngine':
status = i.name()
if status == False:
print 'not connected to shading group'
return status
def bakeTiles(camera='persp', shadingGroup=False, bakeSet=False):
selectedObjects = ls (sl=True, fl=True)
for myObj in selectedObjects:
if bakeSet == False:
bakeSet = getBakeConnection(myObj)
if shadingGroup == False:
shadingGroup = getSGConnection(myObj)
if bakeSet != False or shadingGroup != False or checkBakeSet(bakeSet) != False:
tiles = getTiles (myObj)
processLightMaps(tiles, camera, bakeSet, shadingGroup, myObj)
select (selectedObjects)
|
{
"content_hash": "8ff68415c0b21faa086374cdbda3afb6",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 98,
"avg_line_length": 27.123595505617978,
"alnum_prop": 0.6980115990057995,
"repo_name": "aaronfang/personal_scripts",
"id": "f56bb4c4a4fd455c2eb274c1c35f9b5151b6751c",
"size": "5523",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "af_scripts/tmp/batchBakeUDIM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "319303"
},
{
"name": "Python",
"bytes": "154066"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
class PointManager(models.Manager):
"""Manager for Pressure Points."""
def recently_added(self, count=10):
return self.order_by('-time_added')[:count]
class City(models.Model):
"""City the Pressure Point belong to."""
name = models.CharField(max_length=200)
slug = models.SlugField()
lat = models.FloatField()
lon = models.FloatField()
def __unicode__(self):
return self.name
class Point(models.Model):
"""Pressure Point model.
The pressure points are the core concept of the app. They're small cases
that the community shares, discusses about and eventually, take action
upon in order to improve the quality of life.
"""
title = models.CharField(max_length=200)
lat = models.FloatField()
lon = models.FloatField()
description = models.TextField()
# descriptive address or directions on how to find the Point
directions = models.TextField()
time_added = models.DateTimeField()
# simple voting mechanism (like/dislike)
thumbsup = models.IntegerField()
thumbsdown = models.IntegerField()
# foreign keys
poster = models.ForeignKey(User)
city = models.ForeignKey(City)
# managers
objects = PointManager()
def __unicode__(self):
return "%s x %s" % (self.lat, self.lon)
class Photo(models.Model):
"""Photo objects illustrating Pressure Points."""
time_added = models.DateTimeField()
thumbnail = models.ImageField(upload_to='upload/thumbnails', blank=True)
original = models.ImageField(upload_to='upload/original')
is_main = models.BooleanField()
poster = models.ForeignKey(User)
point = models.ForeignKey(Point, related_name='photos')
def save(self, *args, **kwargs):
if self.id is None:
self.thumbnail = self.original
super(Photo, self).save(*args, **kwargs)
class FeatureManager(models.Manager):
"""Manager for Feature objects."""
def current(self):
now = datetime.now()
return self.filter(start_time__lt=now, end_time__gt=now)
class Feature(models.Model):
"""Pressure Point features on the home page."""
start_time = models.DateTimeField()
end_time = models.DateTimeField()
point = models.ForeignKey(Point, related_name='features')
objects = FeatureManager()
class Resolution(models.Model):
"""Resolution objects describe how a Pressure Point was closed."""
description = models.TextField()
time_resolved = models.DateTimeField()
point = models.OneToOneField(Point, related_name='resolution')
|
{
"content_hash": "1f4104644d05a3f6e12f340174deae91",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 76,
"avg_line_length": 31.186046511627907,
"alnum_prop": 0.6797166293810589,
"repo_name": "stasm/akupunktura",
"id": "f72a355540cc984f158300eaacfdbb6e9b3587e6",
"size": "2682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/points/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9516"
}
],
"symlink_target": ""
}
|
r"""
==========================================
B701: Test for not auto escaping in jinja2
==========================================
Jinja2 is a Python HTML templating system. It is typically used to build web
applications, though appears in other places well, notably the Ansible
automation system. When configuring the Jinja2 environment, the option to use
autoescaping on input can be specified. When autoescaping is enabled, Jinja2
will filter input strings to escape any HTML content submitted via template
variables. Without escaping HTML input the application becomes vulnerable to
Cross Site Scripting (XSS) attacks.
Unfortunately, autoescaping is False by default. Thus this plugin test will
warn on omission of an autoescape setting, as well as an explicit setting of
false. A HIGH severity warning is generated in either of these scenarios.
:Example:
.. code-block:: none
>> Issue: Using jinja2 templates with autoescape=False is dangerous and can
lead to XSS. Use autoescape=True to mitigate XSS vulnerabilities.
Severity: High Confidence: High
Location: ./examples/jinja2_templating.py:11
10 templateEnv = jinja2.Environment(autoescape=False,
loader=templateLoader)
11 Environment(loader=templateLoader,
12 load=templateLoader,
13 autoescape=False)
14
>> Issue: By default, jinja2 sets autoescape to False. Consider using
autoescape=True or use the select_autoescape function to mitigate XSS
vulnerabilities.
Severity: High Confidence: High
Location: ./examples/jinja2_templating.py:15
14
15 Environment(loader=templateLoader,
16 load=templateLoader)
17
18 Environment(autoescape=select_autoescape(['html', 'htm', 'xml']),
19 loader=templateLoader)
.. seealso::
- `OWASP XSS <https://www.owasp.org/index.php/Cross-site_Scripting_(XSS)>`_
- https://realpython.com/blog/python/primer-on-jinja-templating/
- http://jinja.pocoo.org/docs/dev/api/#autoescaping
- https://security.openstack.org
- https://security.openstack.org/guidelines/dg_cross-site-scripting-xss.html
.. versionadded:: 0.10.0
"""
import ast
import bandit
from bandit.core import test_properties as test
@test.checks('Call')
@test.test_id('B701')
def jinja2_autoescape_false(context):
# check type just to be safe
if isinstance(context.call_function_name_qual, str):
qualname_list = context.call_function_name_qual.split('.')
func = qualname_list[-1]
if 'jinja2' in qualname_list and func == 'Environment':
for node in ast.walk(context.node):
if isinstance(node, ast.keyword):
# definite autoescape = False
if (getattr(node, 'arg', None) == 'autoescape' and
(getattr(node.value, 'id', None) == 'False' or
getattr(node.value, 'value', None) is False)):
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="Using jinja2 templates with autoescape="
"False is dangerous and can lead to XSS. "
"Use autoescape=True or use the "
"select_autoescape function to mitigate XSS "
"vulnerabilities."
)
# found autoescape
if getattr(node, 'arg', None) == 'autoescape':
value = getattr(node, 'value', None)
if (getattr(value, 'id', None) == 'True' or
getattr(value, 'value', None) is True):
return
# Check if select_autoescape function is used.
elif isinstance(value, ast.Call) and getattr(
value.func, 'id', None) == 'select_autoescape':
return
else:
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.MEDIUM,
text="Using jinja2 templates with autoescape="
"False is dangerous and can lead to XSS. "
"Ensure autoescape=True or use the "
"select_autoescape function to mitigate "
"XSS vulnerabilities."
)
# We haven't found a keyword named autoescape, indicating default
# behavior
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="By default, jinja2 sets autoescape to False. Consider "
"using autoescape=True or use the select_autoescape "
"function to mitigate XSS vulnerabilities."
)
|
{
"content_hash": "9f9ad06d1256ecd29556c9d6f473629c",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 44.54782608695652,
"alnum_prop": 0.5621706031622097,
"repo_name": "pombredanne/bandit",
"id": "2adc77a60a6f6bd9f86d18bcf190164ae6c1c630",
"size": "5755",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bandit/plugins/jinja2_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "476344"
},
{
"name": "Shell",
"bytes": "1286"
}
],
"symlink_target": ""
}
|
import functools
import itertools
import os
import re
import urlparse
from oslo.config import cfg
import webob
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import quota
osapi_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='the maximum number of items returned in a single '
'response from a collection resource'),
cfg.StrOpt('osapi_compute_link_prefix',
help='Base URL that will be presented to users in links '
'to the OpenStack Compute API'),
cfg.StrOpt('osapi_glance_link_prefix',
help='Base URL that will be presented to users in links '
'to glance resources'),
]
CONF = cfg.CONF
CONF.register_opts(osapi_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
_STATE_MAP = {
vm_states.ACTIVE: {
'default': 'ACTIVE',
task_states.REBOOTING: 'REBOOT',
task_states.REBOOTING_HARD: 'HARD_REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
task_states.MIGRATING: 'MIGRATING',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.BUILDING: {
'default': 'BUILD',
},
vm_states.STOPPED: {
'default': 'SHUTOFF',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.RESIZED: {
'default': 'VERIFY_RESIZE',
# Note(maoy): the OS API spec 1.1 doesn't have CONFIRMING_RESIZE
# state so we comment that out for future reference only.
#task_states.RESIZE_CONFIRMING: 'CONFIRMING_RESIZE',
task_states.RESIZE_REVERTING: 'REVERT_RESIZE',
},
vm_states.PAUSED: {
'default': 'PAUSED',
},
vm_states.SUSPENDED: {
'default': 'SUSPENDED',
},
vm_states.RESCUED: {
'default': 'RESCUE',
},
vm_states.ERROR: {
'default': 'ERROR',
},
vm_states.DELETED: {
'default': 'DELETED',
},
vm_states.SOFT_DELETED: {
'default': 'SOFT_DELETED',
},
vm_states.SHELVED: {
'default': 'SHELVED',
},
vm_states.SHELVED_OFFLOADED: {
'default': 'SHELVED_OFFLOADED',
},
}
def status_from_state(vm_state, task_state='default'):
"""Given vm_state and task_state, return a status string."""
task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN'))
status = task_map.get(task_state, task_map['default'])
if status == "UNKNOWN":
LOG.error(_("status is UNKNOWN from vm_state=%(vm_state)s "
"task_state=%(task_state)s. Bad upgrade or db "
"corrupted?"),
{'vm_state': vm_state, 'task_state': task_state})
return status
def task_and_vm_state_from_status(status):
"""Map the server status string to list of vm states and
list of task states.
"""
vm_states = set()
task_states = set()
for state, task_map in _STATE_MAP.iteritems():
for task_state, mapped_state in task_map.iteritems():
status_string = mapped_state
if status.lower() == status_string.lower():
vm_states.add(state)
task_states.add(task_state)
# Add sort to avoid different order on set in Python 3
return sorted(vm_states), sorted(task_states)
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_int_param(request, 'limit')
if 'page_size' in request.GET:
params['page_size'] = _get_int_param(request, 'page_size')
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_int_param(request, param):
"""Extract integer param from request or fail."""
try:
int_param = int(request.GET[param])
except ValueError:
msg = _('%s param must be an integer') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
if int_param < 0:
msg = _('%s param must be positive') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
return int_param
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
"""get limited parameter from request."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
marker = params.get('marker')
return limit, marker
def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
limit, marker = get_limit_and_marker(request, max_limit)
limit = min(max_limit, limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def get_id_from_href(href):
"""Return the id or uuid portion of a url.
Given: 'http://www.foo.com/bar/123?q=4'
Returns: '123'
Given: 'http://www.foo.com/bar/abc123?q=4'
Returns: 'abc123'
"""
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.nova.com/v1.1/123'
Returns: 'http://www.nova.com/123'
Given: 'http://www.nova.com/v1.1'
Returns: 'http://www.nova.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = _('href %s does not contain version') % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def check_img_metadata_properties_quota(context, metadata):
if not metadata:
return
try:
QUOTAS.limit_check(context, metadata_items=len(metadata))
except exception.OverQuota:
expl = _("Image metadata limit exceeded")
raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl,
headers={'Retry-After': 0})
# check the key length.
if isinstance(metadata, dict):
for key, value in metadata.iteritems():
if len(key) == 0:
expl = _("Image metadata key cannot be blank")
raise webob.exc.HTTPBadRequest(explanation=expl)
if len(key) > 255:
expl = _("Image metadata key too long")
raise webob.exc.HTTPBadRequest(explanation=expl)
else:
expl = _("Invalid image metadata")
raise webob.exc.HTTPBadRequest(explanation=expl)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.iteritems():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
def get_networks_for_instance_from_nw_info(nw_info):
networks = {}
for vif in nw_info:
ips = vif.fixed_ips()
floaters = vif.floating_ips()
label = vif['network']['label']
if label not in networks:
networks[label] = {'ips': [], 'floating_ips': []}
networks[label]['ips'].extend(ips)
networks[label]['floating_ips'].extend(floaters)
for ip in itertools.chain(networks[label]['ips'],
networks[label]['floating_ips']):
ip['mac_address'] = vif['address']
return networks
def get_networks_for_instance(context, instance):
"""Returns a prepared nw_info list for passing into the view builders
We end up with a data structure like::
{'public': {'ips': [{'address': '10.0.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '2001::1',
'version': 6,
'mac_address': 'aa:aa:aa:aa:aa:aa'}],
'floating_ips': [{'address': '172.16.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '172.16.2.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'}]},
...}
"""
nw_info = compute_utils.get_nw_info_for_instance(instance)
return get_networks_for_instance_from_nw_info(nw_info)
def raise_http_conflict_for_instance_invalid_state(exc, action):
"""Return a webob.exc.HTTPConflict instance containing a message
appropriate to return via the API based on the original
InstanceInvalidState exception.
"""
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
not_launched = exc.kwargs.get('not_launched')
if attr and state:
msg = _("Cannot '%(action)s' while instance is in %(attr)s "
"%(state)s") % {'action': action, 'attr': attr, 'state': state}
elif not_launched:
msg = _("Cannot '%s' an instance which has never been active") % action
else:
# At least give some meaningful message
msg = _("Instance is in an invalid state for '%s'") % action
raise webob.exc.HTTPConflict(explanation=msg)
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = xmlutil.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = xmlutil.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
if metadata_node is None:
return {}
metadata = {}
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
def _extract_metadata_container(self, datastring):
dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
def create(self, datastring):
return self._extract_metadata_container(datastring)
def update_all(self, datastring):
return self._extract_metadata_container(datastring)
def update(self, datastring):
dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
metadata_nsmap = {None: xmlutil.XMLNS_V11}
class MetaItemTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
root = xmlutil.TemplateElement('meta', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
class MetadataTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return True
class MetadataTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = MetadataTemplateElement('metadata', selector='metadata')
elem = xmlutil.SubTemplateElement(root, 'meta',
selector=xmlutil.get_items)
elem.set('key', 0)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.allow_instance_snapshots:
LOG.warn(_('Rejecting snapshot request, snapshots currently'
' disabled'))
msg = _("Instance snapshots are not permitted at this time.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return f(*args, **kwargs)
return inner
class ViewBuilder(object):
"""Model API responses as dictionaries."""
def _get_project_id(self, request):
"""
Get project id from request url if present or empty string
otherwise
"""
project_id = request.environ["nova.context"].project_id
if project_id in request.url:
return project_id
return ''
def _get_links(self, request, identifier, collection_name):
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
}]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_compute_link_prefix(request.application_url)
url = os.path.join(prefix,
self._get_project_id(request),
collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
prefix = self._update_compute_link_prefix(request.application_url)
return os.path.join(prefix,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier, collection_name):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_compute_link_prefix(base_url)
return os.path.join(base_url,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_collection_links(self,
request,
items,
collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable."""
links = []
limit = int(request.params.get("limit", 0))
if limit and limit == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
elif 'id' in last_item:
last_item_id = last_item["id"]
else:
last_item_id = last_item["flavorid"]
links.append({
"rel": "next",
"href": self._get_next_link(request,
last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
def _update_glance_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_glance_link_prefix)
def _update_compute_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_compute_link_prefix)
|
{
"content_hash": "06f42164e7c874d2c947518d313829f2",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 79,
"avg_line_length": 35.602914389799636,
"alnum_prop": 0.5841092806712371,
"repo_name": "sacharya/nova",
"id": "ba3cb075db609602766811adb6a3db7575195ee8",
"size": "20227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13505239"
},
{
"name": "Shell",
"bytes": "16239"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
from StringIO import StringIO
from Registry import Registry, RegistryLog
def print_test_testAAAA_testBBBB(reg):
try:
reg.root().find_key('testAAAA')
print('testAAAA found!')
except Exception:
print('testAAAA not found!')
try:
reg.root().find_key('testBBBB')
print('testBBBB found!')
except Exception:
print('testBBBB not found!')
def print_test_fdenytsconnections(reg):
val = reg.root().find_key('ControlSet001\\Control\\Terminal Server').value('fDenyTSConnections').value()
print('fDenyTSConnections = ' + str(val))
if len(sys.argv) != 4:
print('You need to specify 3 files to test!')
sys.exit(255)
primary_filepath = sys.argv[1]
log1_filepath = sys.argv[2]
log2_filepath = sys.argv[3]
primary = StringIO()
with open(primary_filepath, 'rb') as f:
primary.write(f.read())
primary.seek(0)
log1 = RegistryLog.RegistryLog(primary, log1_filepath)
primary.seek(0)
log2 = RegistryLog.RegistryLog(primary, log2_filepath)
primary.seek(0)
reg = Registry.Registry(primary)
# Run the tests for the first time
print_test_testAAAA_testBBBB(reg)
print_test_fdenytsconnections(reg)
r = reg._regf.recovery_required()
if not (r.recover_header or r.recover_data):
print('Recovery not required!')
sys.exit(0)
if not r.recover_header:
print('Current hbins size: ' + str(reg._regf.hbins_size()))
print('Header recovery: ' + str(r.recover_header))
print('Data recovery: ' + str(r.recover_data))
apply_first = False
apply_second = False
logs_count = 0
if log1.is_eligible_log():
logs_count += 1
apply_first = True
if log2.is_eligible_log():
logs_count += 1
apply_second = True
print('Eligible log files count: ' + str(logs_count))
if logs_count == 1:
if apply_first:
print('Applying the first log')
seqnum = log1.recover_hive()
print('Finishing with sequence number = ' + str(seqnum))
elif apply_second:
print('Applying the second log')
seqnum = log2.recover_hive()
print('Finishing with sequence number = ' + str(seqnum))
else:
print('Bug!')
elif logs_count == 2:
first_then_second = log1.is_starting_log(log2)
if (not r.recover_header) and first_then_second:
print('Applying the first log')
seqnum = log1.recover_hive()
print('Finishing with sequence number = ' + str(seqnum))
print('Applying the second log')
seqnum = log2.recover_hive_continue(seqnum + 1)
print('Finishing with sequence number = ' + str(seqnum))
elif (not r.recover_header):
print('Applying the second log')
seqnum = log2.recover_hive()
print('Finishing with sequence number = ' + str(seqnum))
print('Applying the first log')
seqnum = log1.recover_hive_continue(seqnum + 1)
print('Finishing with sequence number = ' + str(seqnum))
else:
if first_then_second:
print('Applying the second log')
seqnum = log2.recover_hive()
print('Finishing with sequence number = ' + str(seqnum))
else:
print('Applying the first log')
seqnum = log1.recover_hive()
print('Finishing with sequence number = ' + str(seqnum))
primary.seek(0)
reg = Registry.Registry(primary)
# Run the tests again
print_test_testAAAA_testBBBB(reg)
print_test_fdenytsconnections(reg)
# Print the final values of the updated REGF block
print('hive_sequence1 = ' + str(reg._regf.hive_sequence1()))
print('hive_sequence2 = ' + str(reg._regf.hive_sequence2()))
r = reg._regf.recovery_required()
if not (r.recover_header or r.recover_data):
print('Recovery not required!')
print('Current hbins size: ' + str(reg._regf.hbins_size()))
else:
print('Recovery is required! Bug!')
print('REGF block checksum written: ' + str(reg._regf.checksum()))
print('REGF block checksum calculated: ' + str(reg._regf.calculate_checksum()))
|
{
"content_hash": "b05198c951c18ffd43a5d675e2d12759",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 108,
"avg_line_length": 31.440944881889763,
"alnum_prop": 0.6574004507888805,
"repo_name": "zweger/python-registry",
"id": "9f523ba8eb264a1ae1993d8a3b3c457c6c9f834a",
"size": "4016",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "testing/TransactionLogFiles.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102923"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from core.models import Parent, Child, Unrelated
class CascadeDeleteTests(TestCase):
def setUp(self):
self.parent = Parent(name="Joe")
self.parent.save()
self.child = Child(name="John-Boy", parent=self.parent)
self.child.save()
def test_delete_child(self):
"""Delete the child object only."""
self.child.delete()
# confirm that child has been deleted
self.assertTrue(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_delete_parent_cascades(self):
"""Call the parent.delete() method."""
self.parent.delete()
# confirm that child has been deleted has been deleted
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_delete_parent_cascade_multiple(self):
"""Call the parent.delete() method with many children."""
Child(name=u"Bob", parent=self.parent).save()
Child(name=u"Gob", parent=self.parent).save()
Child(name=u"Lob", parent=self.parent).save()
self.parent.delete()
# confirm that child has been deleted has been deleted
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_delete_parent_child_fail(self):
"""Call the parent.delete() and fail on a child delete."""
Child(name=u"Bob", parent=self.parent).save()
Child(name=u"Gob", parent=self.parent).save()
Child(name=u"Lob", parent=self.parent).save()
Child(name=u"Job", parent=self.parent).save()
self.assertRaises(Exception, self.parent.delete)
# confirm that nothing has been deleted
self.assertTrue(Parent.objects.exists())
self.assertEqual(Child.objects.count(), 5)
def test_delete_parent_parent_fail(self):
"""Call the parent.delete() and fail on parent deletion."""
self.parent.name=u"Job"
self.parent.save()
self.assertRaises(Exception, self.parent.delete)
# confirm that nothing has been deleted
self.assertTrue(Parent.objects.exists())
self.assertEqual(Child.objects.count(), 1)
def test_delete_everything(self):
"""Call the parent.delete() and fail on parent deletion."""
Child(name="Baby", parent=self.parent).save()
Unrelated(name="Igor").save()
self.parent.delete()
# confirm that everything has been deleted.
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
self.assertFalse(Unrelated.objects.exists())
def test_delete_everything_fail(self):
"""Call the parent.delete() and fail on parent deletion."""
Child(name="Baby", parent=self.parent).save()
Child(name="Job", parent=self.parent).save()
Unrelated(name="Igor").save()
self.assertRaises(Exception, self.parent.delete)
# confirm that everything has been deleted.
self.assertTrue(Parent.objects.exists())
self.assertTrue(Child.objects.exists())
self.assertTrue(Unrelated.objects.exists())
|
{
"content_hash": "92e52060b8bd035d04df82cbec7b2120",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 67,
"avg_line_length": 41.09090909090909,
"alnum_prop": 0.6479140328697851,
"repo_name": "yunojuno/cascade",
"id": "f7ce20db0accae33b49e9406bd7a3395457dbe0b",
"size": "3188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/tests.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from selenium.webdriver.common.by import By
class ProductPage():
def __init__(self, driver):
self.driver = driver
def linkhome(self):
return self.driver.find_element(By.LINK_TEXT, 'Home')
def addtocart(self):
return self.driver.find_element(By.NAME, 'add_cart_product')
|
{
"content_hash": "dc171b81ab681258bc0ef749ada6b276",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.6231003039513677,
"repo_name": "helenjon/sel-5",
"id": "3ebbbe2c705935812e3ea051e217713548623bf5",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "six_module/product_page.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31123"
}
],
"symlink_target": ""
}
|
from django.shortcuts import redirect, render
from ..models import Reminder
def review(request, pk):
reminder = Reminder.objects.get(pk=pk)
if request.method == "POST":
reminder.page_reviewed = True
reminder.save()
return redirect('wagtailadmin_home')
return render(request, 'wagtailrelevancy/review.html', {
'request': request,
'reminder': reminder,
})
|
{
"content_hash": "99b75f8d28f22378648f2c3a956a07ba",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 24.41176470588235,
"alnum_prop": 0.653012048192771,
"repo_name": "takeflight/wagtail-relevancy",
"id": "f89fafef1e81ca248153a6090755d9afd2a75449",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtailrelevancy/views/actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7257"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "10707"
}
],
"symlink_target": ""
}
|
from django.contrib import messages
from django.contrib.sites.models import get_current_site
from django.template import TemplateSyntaxError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from oscar.core.loading import get_class, get_model
CommunicationEventType = get_model('customer', 'CommunicationEventType')
CommunicationEventTypeForm = get_class('dashboard.communications.forms',
'CommunicationEventTypeForm')
Dispatcher = get_class('customer.utils', 'Dispatcher')
class ListView(generic.ListView):
model = CommunicationEventType
template_name = 'dashboard/comms/list.html'
context_object_name = 'commtypes'
class UpdateView(generic.UpdateView):
model = CommunicationEventType
form_class = CommunicationEventTypeForm
template_name = 'dashboard/comms/detail.html'
context_object_name = 'commtype'
success_url = '.'
slug_field = 'code'
def form_invalid(self, form):
messages.error(self.request,
_("The submitted form was not valid, please correct "
"the errors and resubmit"))
return super(UpdateView, self).form_invalid(form)
def form_valid(self, form):
if 'send_preview' in self.request.POST:
return self.send_preview(form)
if 'show_preview' in self.request.POST:
return self.show_preview(form)
messages.success(self.request, _("Email saved"))
return super(UpdateView, self).form_valid(form)
def get_messages_context(self, form):
ctx = {'user': self.request.user,
'site': get_current_site(self.request)}
ctx.update(form.get_preview_context())
return ctx
def show_preview(self, form):
ctx = super(UpdateView, self).get_context_data()
ctx['form'] = form
commtype = form.save(commit=False)
commtype_ctx = self.get_messages_context(form)
try:
msgs = commtype.get_messages(commtype_ctx)
except TemplateSyntaxError as e:
form.errors['__all__'] = form.error_class([six.text_type(e)])
return self.render_to_response(ctx)
ctx['show_preview'] = True
ctx['preview'] = msgs
return self.render_to_response(ctx)
def send_preview(self, form):
ctx = super(UpdateView, self).get_context_data()
ctx['form'] = form
commtype = form.save(commit=False)
commtype_ctx = self.get_messages_context(form)
try:
msgs = commtype.get_messages(commtype_ctx)
except TemplateSyntaxError as e:
form.errors['__all__'] = form.error_class([six.text_type(e)])
return self.render_to_response(ctx)
email = form.cleaned_data['preview_email']
dispatch = Dispatcher()
dispatch.send_email_messages(email, msgs)
messages.success(self.request,
_("A preview email has been sent to %s") % email)
return self.render_to_response(ctx)
|
{
"content_hash": "81a2d8e554dd21cc0f360c446c234e44",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 76,
"avg_line_length": 36.75,
"alnum_prop": 0.640427599611273,
"repo_name": "Bogh/django-oscar",
"id": "15ae8d05700cb1d984fef32ec6f9ec826ce43a0d",
"size": "3087",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/oscar/apps/dashboard/communications/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "784855"
},
{
"name": "HTML",
"bytes": "585691"
},
{
"name": "JavaScript",
"bytes": "942620"
},
{
"name": "Makefile",
"bytes": "4909"
},
{
"name": "Python",
"bytes": "1759265"
},
{
"name": "Shell",
"bytes": "3873"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
"""Events Interface."""
from shellcraft.core import BaseItem, BaseFactory
from shellcraft._cli_impl import echo, ask
from shellcraft.utils import convert_resource_value, format_name
from shellcraft.game_state_pb2 import Mission as MissionPB
from shellcraft.world import NPCFactory
import datetime
import random
class Mission(BaseItem):
"""A trade contract, requesting a certain quantity of a resource in a given time."""
def randomize(self, game):
"""Generate random mission."""
self.writer = NPCFactory.make()
random.shuffle(game.state.resources_enabled)
if not self.reward_type or self.reward_type == "resource":
demand_type, reward_type = game.state.resources_enabled[:2]
elif self.reward_type == "reputation":
demand_type = game.state.resources_enabled[1]
if game.workshop.available_items:
best_available_tool = max(
game.workshop.available_items,
key=lambda item: item.mining_bonus.get(demand_type, 0),
)
efficiency = best_available_tool.mining_bonus.get(demand_type) or 1
else:
efficiency = 1
difficulty = game.mining_difficulty.get(demand_type)
extra_demand = random.random() * game.resources.get(demand_type)
self.demand = int(game.resources.get(demand_type) + extra_demand)
self.due = (
int(extra_demand / efficiency * difficulty * (2 + random.random())) + 10
)
self.demand_type = demand_type
self.reward = int(
(1 + game.state.trade_reputation + 0.3 * random.random())
* convert_resource_value(demand_type, reward_type)
* self.demand
)
self.reward_type = reward_type
# a = str(self.writer)
# print("DEM", format_name(self.writer))
# self.writer.CopyFrom(npc)
def vars(self, game):
return {
"writer": format_name(self.writer),
"demand": self.demand,
"due": self.due,
"demand_type": self.demand_type,
"reward": self.reward,
"reward_type": self.reward_type,
"deficit": self.demand - game.resources.get(self.demand_type),
}
def offer(self, game):
# return re.sub(" +", " ", d.replace("\n", " "))
echo(self.strings["intro"].format(**self.vars(game)))
if ask(self.strings["ask"].format(**self.vars(game))):
self.deadline.FromDatetime(
datetime.datetime.now() + datetime.timedelta(seconds=self.due)
)
echo(self.strings["agree"].format(**self.vars(game)))
return True
else:
game.state.trade_reputation -= 0.02
echo(self.strings["disagree"].format(**self.vars(game)))
return False
def is_completed(self, game):
if datetime.datetime.now() > self.deadline.ToDatetime():
# Failed!
echo(self.strings["failed"].format(**self.vars(game)))
return True
if game.resources.get(self.demand_type) >= self.demand:
echo(self.strings["completed"].format(**self.vars(game)))
game.resources.add(self.demand_type, -self.demand)
game.resources.add(self.reward_type, self.reward)
return True
return False
def __repr__(self):
return "blank"
return "<{demand} {demand_type} in {due}s for {reward} {reward_type}>".format(
**vars(self)
)
class MissionFactory(BaseFactory):
FIXTURES = "missions.toml"
ITEM_CLASS = Mission
PB_CLASS = MissionPB
def make(self, mission):
new_mission = super(MissionFactory, self).make(mission)
if not hasattr(new_mission, "demand"):
new_mission.randomize(self.game)
return new_mission
|
{
"content_hash": "8e4fe23aaff90aa64145f6ee4ffae58b",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 88,
"avg_line_length": 36.79047619047619,
"alnum_prop": 0.5943567175770127,
"repo_name": "maebert/shellcraft",
"id": "ce35c47c6f28c855f124a514d0b80adc9489926f",
"size": "3888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/shellcraft/missions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2189"
},
{
"name": "Python",
"bytes": "73150"
}
],
"symlink_target": ""
}
|
"""This example gets all companies.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
company_service = client.GetService('CompanyService', version='v202211')
# Create a statement to select companies.
statement = ad_manager.StatementBuilder(version='v202211')
# Retrieve a small amount of companies at a time, paging
# through until all companies have been retrieved.
while True:
response = company_service.getCompaniesByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for company in response['results']:
# Print out some information for each company.
print('Company with ID "%d", name "%s", and type "%s" was found.\n' %
(company['id'], company['name'], company['type']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "58c72bf2f4c8cd803adde27fb656822c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 34.1764705882353,
"alnum_prop": 0.6970740103270223,
"repo_name": "googleads/googleads-python-lib",
"id": "e52278ea61444b0f041a19f3c105b204bcb2dc1f",
"size": "1783",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202211/company_service/get_all_companies.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
}
|
"""
The database connections are read‐only, so SQL injection attacks can’t be a
problem.
"""
import sys
import os
import threading
import decimal
import time
import json
import re
import requests
import collections
import logging
logger = logging.getLogger(__name__)
from logging import handlers as logging_handlers
D = decimal.Decimal
import binascii
import struct
import apsw
import flask
from flask_httpauth import HTTPBasicAuth
import jsonrpc
from jsonrpc import dispatcher
from jsonrpc.exceptions import JSONRPCDispatchException
import inspect
from xmltodict import unparse as serialize_to_xml
from counterpartylib.lib import config
from counterpartylib.lib import exceptions
from counterpartylib.lib import util
from counterpartylib.lib import check
from counterpartylib.lib import backend
from counterpartylib.lib import database
from counterpartylib.lib import transaction
from counterpartylib.lib import blocks
from counterpartylib.lib import script
from counterpartylib.lib.messages import send
from counterpartylib.lib.messages import order
from counterpartylib.lib.messages import btcpay
from counterpartylib.lib.messages import issuance
from counterpartylib.lib.messages import broadcast
from counterpartylib.lib.messages import bet
from counterpartylib.lib.messages import dividend
from counterpartylib.lib.messages import burn
from counterpartylib.lib.messages import cancel
from counterpartylib.lib.messages import rps
from counterpartylib.lib.messages import rpsresolve
from counterpartylib.lib.messages import publish
from counterpartylib.lib.messages import execute
API_TABLES = ['assets', 'balances', 'credits', 'debits', 'bets', 'bet_matches',
'broadcasts', 'btcpays', 'burns', 'cancels',
'dividends', 'issuances', 'orders', 'order_matches', 'sends',
'bet_expirations', 'order_expirations', 'bet_match_expirations',
'order_match_expirations', 'bet_match_resolutions', 'rps',
'rpsresolves', 'rps_matches', 'rps_expirations', 'rps_match_expirations',
'mempool']
API_TRANSACTIONS = ['bet', 'broadcast', 'btcpay', 'burn', 'cancel',
'dividend', 'issuance', 'order', 'send',
'rps', 'rpsresolve', 'publish', 'execute']
COMMONS_ARGS = ['encoding', 'fee_per_kb', 'regular_dust_size',
'multisig_dust_size', 'op_return_value', 'pubkey',
'allow_unconfirmed_inputs', 'fee', 'fee_provided',
'unspent_tx_hash', 'custom_inputs', 'dust_return_pubkey', 'disable_utxo_locks']
API_MAX_LOG_SIZE = 10 * 1024 * 1024 #max log size of 20 MB before rotation (make configurable later)
API_MAX_LOG_COUNT = 10
JSON_RPC_ERROR_API_COMPOSE = -32001 #code to use for error composing transaction result
current_api_status_code = None #is updated by the APIStatusPoller
current_api_status_response_json = None #is updated by the APIStatusPoller
class APIError(Exception):
pass
class BackendError(Exception):
pass
def check_backend_state():
"""Checks blocktime of last block to see if {} Core is running behind.""".format(config.BTC_NAME)
block_count = backend.getblockcount()
block_hash = backend.getblockhash(block_count)
cblock = backend.getblock(block_hash)
time_behind = time.time() - cblock.nTime # TODO: Block times are not very reliable.
if time_behind > 60 * 60 * 2: # Two hours.
raise BackendError('Bitcoind is running about {} hours behind.'.format(round(time_behind / 3600)))
logger.debug('Backend state check passed.')
class DatabaseError(Exception):
pass
def check_database_state(db, blockcount):
"""Checks {} database to see if is caught up with backend.""".format(config.XCP_NAME)
if util.CURRENT_BLOCK_INDEX + 1 < blockcount:
raise DatabaseError('{} database is behind backend.'.format(config.XCP_NAME))
logger.debug('Database state check passed.')
return
# TODO: ALL queries EVERYWHERE should be done with these methods
def db_query(db, statement, bindings=(), callback=None, **callback_args):
"""Allow direct access to the database in a parametrized manner."""
cursor = db.cursor()
# Sanitize.
forbidden_words = ['pragma', 'attach', 'database', 'begin', 'transaction']
for word in forbidden_words:
if word in statement.lower() or any([word in str(binding).lower() for binding in bindings]):
raise APIError("Forbidden word in query: '{}'.".format(word))
if hasattr(callback, '__call__'):
cursor.execute(statement, bindings)
for row in cursor:
callback(row, **callback_args)
results = None
else:
results = list(cursor.execute(statement, bindings))
cursor.close()
return results
def get_rows(db, table, filters=None, filterop='AND', order_by=None, order_dir=None, start_block=None, end_block=None,
status=None, limit=1000, offset=0, show_expired=True):
"""SELECT * FROM wrapper. Filters results based on a filter data structure (as used by the API)."""
if filters == None:
filters = []
def value_to_marker(value):
# if value is an array place holder is (?,?,?,..)
if isinstance(value, list):
return '''({})'''.format(','.join(['?' for e in range(0, len(value))]))
else:
return '''?'''
# TODO: Document that op can be anything that SQLite3 accepts.
if not table or table.lower() not in API_TABLES:
raise APIError('Unknown table')
if filterop and filterop.upper() not in ['OR', 'AND']:
raise APIError('Invalid filter operator (OR, AND)')
if order_dir and order_dir.upper() not in ['ASC', 'DESC']:
raise APIError('Invalid order direction (ASC, DESC)')
if not isinstance(limit, int):
raise APIError('Invalid limit')
elif limit > 1000:
raise APIError('Limit should be lower or equal to 1000')
if not isinstance(offset, int):
raise APIError('Invalid offset')
# TODO: accept an object: {'field1':'ASC', 'field2': 'DESC'}
if order_by and not re.compile('^[a-z0-9_]+$').match(order_by):
raise APIError('Invalid order_by, must be a field name')
if isinstance(filters, dict): #single filter entry, convert to a one entry list
filters = [filters,]
elif not isinstance(filters, list):
filters = []
# TODO: Document this! (Each filter can be an ordered list.)
new_filters = []
for filter_ in filters:
if type(filter_) in (list, tuple) and len(filter_) in [3, 4]:
new_filter = {'field': filter_[0], 'op': filter_[1], 'value': filter_[2]}
if len(filter_) == 4:
new_filter['case_sensitive'] = filter_[3]
new_filters.append(new_filter)
elif type(filter_) == dict:
new_filters.append(filter_)
else:
raise APIError('Unknown filter type')
filters = new_filters
# validate filter(s)
for filter_ in filters:
for field in ['field', 'op', 'value']: #should have all fields
if field not in filter_:
raise APIError("A specified filter is missing the '%s' field" % field)
if not isinstance(filter_['value'], (str, int, float, list)):
raise APIError("Invalid value for the field '%s'" % filter_['field'])
if isinstance(filter_['value'], list) and filter_['op'].upper() not in ['IN', 'NOT IN']:
raise APIError("Invalid value for the field '%s'" % filter_['field'])
if filter_['op'].upper() not in ['=', '==', '!=', '>', '<', '>=', '<=', 'IN', 'LIKE', 'NOT IN', 'NOT LIKE']:
raise APIError("Invalid operator for the field '%s'" % filter_['field'])
if 'case_sensitive' in filter_ and not isinstance(filter_['case_sensitive'], bool):
raise APIError("case_sensitive must be a boolean")
# SELECT
statement = '''SELECT * FROM {}'''.format(table)
# WHERE
bindings = []
conditions = []
for filter_ in filters:
case_sensitive = False if 'case_sensitive' not in filter_ else filter_['case_sensitive']
if filter_['op'] == 'LIKE' and case_sensitive == False:
filter_['field'] = '''UPPER({})'''.format(filter_['field'])
filter_['value'] = filter_['value'].upper()
marker = value_to_marker(filter_['value'])
conditions.append('''{} {} {}'''.format(filter_['field'], filter_['op'], marker))
if isinstance(filter_['value'], list):
bindings += filter_['value']
else:
bindings.append(filter_['value'])
# AND filters
more_conditions = []
if table not in ['balances', 'order_matches', 'bet_matches']:
if start_block != None:
more_conditions.append('''block_index >= ?''')
bindings.append(start_block)
if end_block != None:
more_conditions.append('''block_index <= ?''')
bindings.append(end_block)
elif table in ['order_matches', 'bet_matches']:
if start_block != None:
more_conditions.append('''tx0_block_index >= ?''')
bindings.append(start_block)
if end_block != None:
more_conditions.append('''tx1_block_index <= ?''')
bindings.append(end_block)
# status
if isinstance(status, list) and len(status) > 0:
more_conditions.append('''status IN {}'''.format(value_to_marker(status)))
bindings += status
elif isinstance(status, str) and status != '':
more_conditions.append('''status == ?''')
bindings.append(status)
# legacy filters
if not show_expired and table == 'orders':
#Ignore BTC orders one block early.
expire_index = util.CURRENT_BLOCK_INDEX + 1
more_conditions.append('''((give_asset == ? AND expire_index > ?) OR give_asset != ?)''')
bindings += [config.BTC, expire_index, config.BTC]
if (len(conditions) + len(more_conditions)) > 0:
statement += ''' WHERE'''
all_conditions = []
if len(conditions) > 0:
all_conditions.append('''({})'''.format(''' {} '''.format(filterop.upper()).join(conditions)))
if len(more_conditions) > 0:
all_conditions.append('''({})'''.format(''' AND '''.join(more_conditions)))
statement += ''' {}'''.format(''' AND '''.join(all_conditions))
# ORDER BY
if order_by != None:
statement += ''' ORDER BY {}'''.format(order_by)
if order_dir != None:
statement += ''' {}'''.format(order_dir.upper())
# LIMIT
if limit:
statement += ''' LIMIT {}'''.format(limit)
if offset:
statement += ''' OFFSET {}'''.format(offset)
return db_query(db, statement, tuple(bindings))
def compose_transaction(db, name, params,
encoding='auto',
fee_per_kb=config.DEFAULT_FEE_PER_KB,
estimate_fee_per_kb=None, estimate_fee_per_kb_nblocks=config.ESTIMATE_FEE_NBLOCKS,
regular_dust_size=config.DEFAULT_REGULAR_DUST_SIZE,
multisig_dust_size=config.DEFAULT_MULTISIG_DUST_SIZE,
op_return_value=config.DEFAULT_OP_RETURN_VALUE,
pubkey=None,
allow_unconfirmed_inputs=False,
fee=None,
fee_provided=0,
unspent_tx_hash=None, custom_inputs=None, dust_return_pubkey=None, disable_utxo_locks=False):
"""Create and return a transaction."""
# Get provided pubkeys.
if type(pubkey) == str:
provided_pubkeys = [pubkey]
elif type(pubkey) == list:
provided_pubkeys = pubkey
elif pubkey == None:
provided_pubkeys = []
else:
assert False
# Get additional pubkeys from `source` and `destination` params.
# Convert `source` and `destination` to pubkeyhash form.
for address_name in ['source', 'destination']:
if address_name in params:
address = params[address_name]
provided_pubkeys += script.extract_pubkeys(address)
params[address_name] = script.make_pubkeyhash(address)
# Check validity of collected pubkeys.
for pubkey in provided_pubkeys:
if not script.is_fully_valid(binascii.unhexlify(pubkey)):
raise script.AddressError('invalid public key: {}'.format(pubkey))
compose_method = sys.modules['counterpartylib.lib.messages.{}'.format(name)].compose
compose_params = inspect.getargspec(compose_method)[0]
missing_params = [p for p in compose_params if p not in params and p != 'db']
for param in missing_params:
params[param] = None
tx_info = compose_method(db, **params)
return transaction.construct(db, tx_info, encoding=encoding,
fee_per_kb=fee_per_kb,
estimate_fee_per_kb=estimate_fee_per_kb, estimate_fee_per_kb_nblocks=estimate_fee_per_kb_nblocks,
regular_dust_size=regular_dust_size,
multisig_dust_size=multisig_dust_size,
op_return_value=op_return_value,
provided_pubkeys=provided_pubkeys,
allow_unconfirmed_inputs=allow_unconfirmed_inputs,
exact_fee=fee,
fee_provided=fee_provided,
unspent_tx_hash=unspent_tx_hash, custom_inputs=custom_inputs,
dust_return_pubkey=dust_return_pubkey,
disable_utxo_locks=disable_utxo_locks)
def conditional_decorator(decorator, condition):
"""Checks the condition and if True applies specified decorator."""
def gen_decorator(f):
if not condition:
return f
return decorator(f)
return gen_decorator
def init_api_access_log(app):
"""Initialize API logger."""
loggers = (logging.getLogger('werkzeug'), app.logger)
# Disable console logging...
for l in loggers:
l.setLevel(logging.INFO)
l.propagate = False
# Log to file, if configured...
if config.API_LOG:
handler = logging_handlers.RotatingFileHandler(config.API_LOG, 'a', API_MAX_LOG_SIZE, API_MAX_LOG_COUNT)
for l in loggers:
l.addHandler(handler)
class APIStatusPoller(threading.Thread):
"""Perform regular checks on the state of the backend and the database."""
def __init__(self):
self.last_database_check = 0
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
logger.debug('Starting API Status Poller.')
global current_api_status_code, current_api_status_response_json
db = database.get_connection(read_only=True, integrity_check=False)
while self.stop_event.is_set() != True:
try:
# Check that backend is running, communicable, and caught up with the blockchain.
# Check that the database has caught up with bitcoind.
if time.time() - self.last_database_check > 10 * 60: # Ten minutes since last check.
if not config.FORCE:
code = 11
logger.debug('Checking backend state.')
check_backend_state()
code = 12
logger.debug('Checking database state.')
check_database_state(db, backend.getblockcount())
self.last_database_check = time.time()
except (BackendError, DatabaseError) as e:
exception_name = e.__class__.__name__
exception_text = str(e)
logger.debug("API Status Poller: %s", exception_text)
jsonrpc_response = jsonrpc.exceptions.JSONRPCServerError(message=exception_name, data=exception_text)
current_api_status_code = code
current_api_status_response_json = jsonrpc_response.json.encode()
else:
current_api_status_code = None
current_api_status_response_json = None
time.sleep(config.BACKEND_POLL_INTERVAL)
class APIServer(threading.Thread):
"""Handle JSON-RPC API calls."""
def __init__(self):
self.is_ready = False
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.join()
self.stop_event.set()
def run(self):
logger.info('Starting API Server.')
db = database.get_connection(read_only=True, integrity_check=False)
app = flask.Flask(__name__)
auth = HTTPBasicAuth()
@auth.get_password
def get_pw(username):
if username == config.RPC_USER:
return config.RPC_PASSWORD
return None
######################
#READ API
# Generate dynamically get_{table} methods
def generate_get_method(table):
def get_method(**kwargs):
try:
return get_rows(db, table=table, **kwargs)
except TypeError as e: #TODO: generalise for all API methods
raise APIError(str(e))
return get_method
for table in API_TABLES:
new_method = generate_get_method(table)
new_method.__name__ = 'get_{}'.format(table)
dispatcher.add_method(new_method)
@dispatcher.add_method
def sql(query, bindings=None):
if bindings == None:
bindings = []
return db_query(db, query, tuple(bindings))
######################
#WRITE/ACTION API
# Generate dynamically create_{transaction} methods
def generate_create_method(tx):
def split_params(**kwargs):
transaction_args = {}
common_args = {}
private_key_wif = None
for key in kwargs:
if key in COMMONS_ARGS:
common_args[key] = kwargs[key]
elif key == 'privkey':
private_key_wif = kwargs[key]
else:
transaction_args[key] = kwargs[key]
return transaction_args, common_args, private_key_wif
def create_method(**kwargs):
try:
transaction_args, common_args, private_key_wif = split_params(**kwargs)
return compose_transaction(db, name=tx, params=transaction_args, **common_args)
except TypeError as e:
raise APIError(str(e))
except (script.AddressError, exceptions.ComposeError, exceptions.TransactionError, exceptions.BalanceError) as error:
error_msg = "Error composing {} transaction via API: {}".format(tx, str(error))
logging.warning(error_msg)
raise JSONRPCDispatchException(code=JSON_RPC_ERROR_API_COMPOSE, message=error_msg)
return create_method
for tx in API_TRANSACTIONS:
create_method = generate_create_method(tx)
create_method.__name__ = 'create_{}'.format(tx)
dispatcher.add_method(create_method)
@dispatcher.add_method
def get_messages(block_index):
if not isinstance(block_index, int):
raise APIError("block_index must be an integer.")
cursor = db.cursor()
cursor.execute('select * from messages where block_index = ? order by message_index asc', (block_index,))
messages = cursor.fetchall()
cursor.close()
return messages
@dispatcher.add_method
def get_messages_by_index(message_indexes):
"""Get specific messages from the feed, based on the message_index.
@param message_index: A single index, or a list of one or more message indexes to retrieve.
"""
if not isinstance(message_indexes, list):
message_indexes = [message_indexes,]
for idx in message_indexes: #make sure the data is clean
if not isinstance(idx, int):
raise APIError("All items in message_indexes are not integers")
cursor = db.cursor()
cursor.execute('SELECT * FROM messages WHERE message_index IN (%s) ORDER BY message_index ASC'
% (','.join([str(x) for x in message_indexes]),))
messages = cursor.fetchall()
cursor.close()
return messages
@dispatcher.add_method
def get_supply(asset):
if asset == 'BTC':
return backend.get_btc_supply(normalize=False)
elif asset == 'XCP':
return util.xcp_supply(db)
else:
return util.asset_supply(db, asset)
@dispatcher.add_method
def get_xcp_supply():
logger.warning("Deprecated method: `get_xcp_supply`")
return util.xcp_supply(db)
@dispatcher.add_method
def get_asset_info(assets):
logger.warning("Deprecated method: `get_asset_info`")
if not isinstance(assets, list):
raise APIError("assets must be a list of asset names, even if it just contains one entry")
assetsInfo = []
for asset in assets:
# BTC and XCP.
if asset in [config.BTC, config.XCP]:
if asset == config.BTC:
supply = backend.get_btc_supply(normalize=False)
else:
supply = util.xcp_supply(db)
assetsInfo.append({
'asset': asset,
'owner': None,
'divisible': True,
'locked': False,
'supply': supply,
'description': '',
'issuer': None
})
continue
# User‐created asset.
cursor = db.cursor()
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY block_index ASC''', ('valid', asset)))
cursor.close()
if not issuances:
continue #asset not found, most likely
else:
last_issuance = issuances[-1]
locked = False
for e in issuances:
if e['locked']: locked = True
assetsInfo.append({
'asset': asset,
'owner': last_issuance['issuer'],
'divisible': bool(last_issuance['divisible']),
'locked': locked,
'supply': util.asset_supply(db, asset),
'description': last_issuance['description'],
'issuer': last_issuance['issuer']})
return assetsInfo
@dispatcher.add_method
def get_block_info(block_index):
assert isinstance(block_index, int)
cursor = db.cursor()
cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (block_index,))
blocks = list(cursor)
if len(blocks) == 1:
block = blocks[0]
elif len(blocks) == 0:
raise exceptions.DatabaseError('No blocks found.')
else:
assert False
cursor.close()
return block
@dispatcher.add_method
def fee_per_kb(nblocks=config.ESTIMATE_FEE_NBLOCKS):
return backend.fee_per_kb(nblocks)
@dispatcher.add_method
def get_blocks(block_indexes, min_message_index=None):
"""fetches block info and messages for the specified block indexes
@param min_message_index: Retrieve blocks from the message feed on or after this specific message index
(useful since blocks may appear in the message feed more than once, if a reorg occurred). Note that
if this parameter is not specified, the messages for the first block will be returned.
"""
if not isinstance(block_indexes, (list, tuple)):
raise APIError("block_indexes must be a list of integers.")
if len(block_indexes) >= 250:
raise APIError("can only specify up to 250 indexes at a time.")
block_indexes_str = ','.join([str(x) for x in block_indexes])
cursor = db.cursor()
# The blocks table gets rolled back from undolog, so min_message_index doesn't matter for this query
cursor.execute('SELECT * FROM blocks WHERE block_index IN (%s) ORDER BY block_index ASC'
% (block_indexes_str,))
blocks = cursor.fetchall()
cursor.execute('SELECT * FROM messages WHERE block_index IN (%s) ORDER BY message_index ASC'
% (block_indexes_str,))
messages = collections.deque(cursor.fetchall())
# Discard any messages less than min_message_index
if min_message_index:
while len(messages) and messages[0]['message_index'] < min_message_index:
messages.popleft()
# Packages messages into their appropriate block in the data structure to be returned
for block in blocks:
block['_messages'] = []
while len(messages) and messages[0]['block_index'] == block['block_index']:
block['_messages'].append(messages.popleft())
#NOTE: if len(messages), then we're only returning the messages for the first set of blocks before the reorg
cursor.close()
return blocks
@dispatcher.add_method
def get_running_info():
latestBlockIndex = backend.getblockcount()
try:
check_database_state(db, latestBlockIndex)
except DatabaseError:
caught_up = False
else:
caught_up = True
try:
cursor = db.cursor()
blocks = list(cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (util.CURRENT_BLOCK_INDEX, )))
assert len(blocks) == 1
last_block = blocks[0]
cursor.close()
except:
last_block = None
try:
last_message = util.last_message(db)
except:
last_message = None
return {
'db_caught_up': caught_up,
'bitcoin_block_count': latestBlockIndex,
'last_block': last_block,
'last_message_index': last_message['message_index'] if last_message else -1,
'running_testnet': config.TESTNET,
'running_testcoin': config.TESTCOIN,
'version_major': config.VERSION_MAJOR,
'version_minor': config.VERSION_MINOR,
'version_revision': config.VERSION_REVISION
}
@dispatcher.add_method
def get_element_counts():
counts = {}
cursor = db.cursor()
for element in ['transactions', 'blocks', 'debits', 'credits', 'balances', 'sends', 'orders',
'order_matches', 'btcpays', 'issuances', 'broadcasts', 'bets', 'bet_matches', 'dividends',
'burns', 'cancels', 'order_expirations', 'bet_expirations', 'order_match_expirations',
'bet_match_expirations', 'messages']:
cursor.execute("SELECT COUNT(*) AS count FROM %s" % element)
count_list = cursor.fetchall()
assert len(count_list) == 1
counts[element] = count_list[0]['count']
cursor.close()
return counts
@dispatcher.add_method
def get_asset_names():
cursor = db.cursor()
names = [row['asset'] for row in cursor.execute("SELECT DISTINCT asset FROM issuances WHERE status = 'valid' ORDER BY asset ASC")]
cursor.close()
return names
@dispatcher.add_method
def get_holder_count(asset):
holders = util.holders(db, asset)
addresses = []
for holder in holders:
addresses.append(holder['address'])
return {asset: len(set(addresses))}
@dispatcher.add_method
def get_holders(asset):
holders = util.holders(db, asset)
return holders
@dispatcher.add_method
def search_raw_transactions(address, unconfirmed=True):
return backend.searchrawtransactions(address, unconfirmed=unconfirmed)
@dispatcher.add_method
def get_unspent_txouts(address, unconfirmed=False, unspent_tx_hash=None):
return backend.get_unspent_txouts(address, unconfirmed=unconfirmed, multisig_inputs=False, unspent_tx_hash=unspent_tx_hash)
@dispatcher.add_method
def getrawtransaction(tx_hash, verbose=False, skip_missing=False):
return backend.getrawtransaction(tx_hash, verbose=verbose, skip_missing=skip_missing)
@dispatcher.add_method
def getrawtransaction_batch(txhash_list, verbose=False, skip_missing=False):
return backend.getrawtransaction_batch(txhash_list, verbose=verbose, skip_missing=skip_missing)
@dispatcher.add_method
def get_tx_info(tx_hex, block_index=None):
# block_index mandatory for transactions before block 335000
source, destination, btc_amount, fee, data = blocks.get_tx_info(tx_hex, block_index=block_index)
return source, destination, btc_amount, fee, util.hexlify(data) if data else ''
@dispatcher.add_method
def unpack(data_hex):
data = binascii.unhexlify(data_hex)
message_type_id = struct.unpack(config.TXTYPE_FORMAT, data[:4])[0]
message = data[4:]
# TODO: Enabled only for `send`.
if message_type_id == send.ID:
unpack_method = send.unpack
else:
raise APIError('unsupported message type')
unpacked = unpack_method(db, message, util.CURRENT_BLOCK_INDEX)
return message_type_id, unpacked
@dispatcher.add_method
# TODO: Rename this method.
def search_pubkey(pubkeyhash, provided_pubkeys=None):
return backend.pubkeyhash_to_pubkey(pubkeyhash, provided_pubkeys=provided_pubkeys)
def _set_cors_headers(response):
if not config.RPC_NO_ALLOW_CORS:
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization'
@app.route('/', defaults={'args_path': ''}, methods=['GET', 'POST', 'OPTIONS'])
@app.route('/<path:args_path>', methods=['GET', 'POST', 'OPTIONS'])
# Only require authentication if RPC_PASSWORD is set.
@conditional_decorator(auth.login_required, hasattr(config, 'RPC_PASSWORD'))
def handle_root(args_path):
"""Handle all paths, decide where to forward the query."""
if args_path == '' or args_path.startswith('api/') or args_path.startswith('API/') or \
args_path.startswith('rpc/') or args_path.startswith('RPC/'):
if flask.request.method == 'POST':
# Need to get those here because it might not be available in this aux function.
request_json = flask.request.get_data().decode('utf-8')
response = handle_rpc_post(request_json)
return response
elif flask.request.method == 'OPTIONS':
response = handle_rpc_options()
return response
else:
error = 'Invalid method.'
return flask.Response(error, 405, mimetype='application/json')
elif args_path.startswith('rest/') or args_path.startswith('REST/'):
if flask.request.method == 'GET' or flask.request.method == 'POST':
# Pass the URL path without /REST/ part and Flask request object.
rest_path = args_path.split('/', 1)[1]
response = handle_rest(rest_path, flask.request)
return response
else:
error = 'Invalid method.'
return flask.Response(error, 405, mimetype='application/json')
else:
# Not found
return flask.Response(None, 404, mimetype='application/json')
######################
# JSON-RPC API
######################
def handle_rpc_options():
response = flask.Response('', 204)
_set_cors_headers(response)
return response
def handle_rpc_post(request_json):
"""Handle /API/ POST route. Call relevant get_rows/create_transaction wrapper."""
# Check for valid request format.
try:
request_data = json.loads(request_json)
assert 'id' in request_data and request_data['jsonrpc'] == "2.0" and request_data['method']
# params may be omitted
except:
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format")
return flask.Response(obj_error.json.encode(), 400, mimetype='application/json')
# Only arguments passed as a `dict` are supported.
if request_data.get('params', None) and not isinstance(request_data['params'], dict):
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(
data='Arguments must be passed as a JSON object (list of unnamed arguments not supported)')
return flask.Response(obj_error.json.encode(), 400, mimetype='application/json')
# Return an error if the API Status Poller checks fail.
if not config.FORCE and current_api_status_code:
return flask.Response(current_api_status_response_json, 503, mimetype='application/json')
# Answer request normally.
# NOTE: `UnboundLocalError: local variable 'output' referenced before assignment` means the method doesn’t return anything.
jsonrpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher)
response = flask.Response(jsonrpc_response.json.encode(), 200, mimetype='application/json')
_set_cors_headers(response)
return response
######################
# HTTP REST API
######################
def handle_rest(path_args, flask_request):
"""Handle /REST/ route. Query the database using get_rows or create transaction using compose_transaction."""
url_action = flask_request.path.split('/')[-1]
if url_action == 'compose':
compose = True
elif url_action == 'get':
compose = False
else:
error = 'Invalid action "%s".' % url_action
return flask.Response(error, 400, mimetype='application/json')
# Get all arguments passed via URL.
url_args = path_args.split('/')
try:
query_type = url_args.pop(0).lower()
except IndexError:
error = 'No query_type provided.'
return flask.Response(error, 400, mimetype='application/json')
# Check if message type or table name are valid.
if (compose and query_type not in API_TRANSACTIONS) or \
(not compose and query_type not in API_TABLES):
error = 'No such query type in supported queries: "%s".' % query_type
return flask.Response(error, 400, mimetype='application/json')
# Parse the additional arguments.
extra_args = flask_request.args.items()
query_data = {}
if compose:
common_args = {}
transaction_args = {}
for (key, value) in extra_args:
# Determine value type.
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
# Split keys into common and transaction-specific arguments. Discard the privkey.
if key in COMMONS_ARGS:
common_args[key] = value
elif key == 'privkey':
pass
else:
transaction_args[key] = value
# Must have some additional transaction arguments.
if not len(transaction_args):
error = 'No transaction arguments provided.'
return flask.Response(error, 400, mimetype='application/json')
# Compose the transaction.
try:
query_data = compose_transaction(db, name=query_type, params=transaction_args, **common_args)
except (script.AddressError, exceptions.ComposeError, exceptions.TransactionError, exceptions.BalanceError) as error:
error_msg = logging.warning("{} -- error composing {} transaction via API: {}".format(
str(error.__class__.__name__), query_type, str(error)))
return flask.Response(error_msg, 400, mimetype='application/json')
else:
# Need to de-generate extra_args to pass it through.
query_args = dict([item for item in extra_args])
operator = query_args.pop('op', 'AND')
# Put the data into specific dictionary format.
data_filter = [{'field': key, 'op': '==', 'value': value} for (key, value) in query_args.items()]
# Run the query.
try:
query_data = get_rows(db, table=query_type, filters=data_filter, filterop=operator)
except APIError as error:
return flask.Response(str(error), 400, mimetype='application/json')
# See which encoding to choose from.
file_format = flask_request.headers['Accept']
# JSON as default.
if file_format == 'application/json' or file_format == '*/*':
response_data = json.dumps(query_data)
elif file_format == 'application/xml':
# Add document root for XML. Note when xmltodict encounters a list, it produces separate tags for every item.
# Hence we end up with multiple query_type roots. To combat this we put it in a separate item dict.
response_data = serialize_to_xml({query_type: {'item': query_data}})
else:
error = 'Invalid file format: "%s".' % file_format
return flask.Response(error, 400, mimetype='application/json')
response = flask.Response(response_data, 200, mimetype=file_format)
return response
# Init the HTTP Server.
init_api_access_log(app)
# Run app server (blocking)
self.is_ready = True
app.run(host=config.RPC_HOST, port=config.RPC_PORT, threaded=True)
db.close()
return
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
{
"content_hash": "38ccfb381f9a6d6839eaffd39bdcf7fe",
"timestamp": "",
"source": "github",
"line_count": 902,
"max_line_length": 184,
"avg_line_length": 44.72062084257206,
"alnum_prop": 0.5714462789429322,
"repo_name": "F483/counterparty-lib",
"id": "cb43834b33c0773c3d669fa3b9aa056aa3547f78",
"size": "40367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "counterpartylib/lib/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "1776644"
},
{
"name": "Python",
"bytes": "815561"
},
{
"name": "Shell",
"bytes": "1257"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0022_auto_20170911_1917'),
]
operations = [
migrations.CreateModel(
name='MultiUserUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('csv_content', models.TextField(help_text='CSV 格式:"username,student_id,email,mobile",其中 username 字段为必填项', verbose_name='用户信息')),
('results', models.TextField(help_text='CSV 格式:"username,password"', verbose_name='创建结果')),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
],
options={
'verbose_name': '批量用户上传',
'verbose_name_plural': '批量用户上传',
'ordering': ['-id'],
},
),
migrations.AlterField(
model_name='user',
name='mobile',
field=models.CharField(blank=True, help_text='11 位数字', max_length=11, null=True, validators=[django.core.validators.RegexValidator('^\\d{11}$', '请输入合法的手机号。', 'invalid')], verbose_name='手机号'),
),
]
|
{
"content_hash": "050b1f975c146afbaf88d8a1b9d5a099",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 203,
"avg_line_length": 39.55882352941177,
"alnum_prop": 0.5858736059479553,
"repo_name": "prajnamort/LambdaOJ2",
"id": "ef35f93aaef10019aad0f5fac26efb7f77a4ba25",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/migrations/0023_auto_20170917_1529.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "456"
},
{
"name": "JavaScript",
"bytes": "27825"
},
{
"name": "Python",
"bytes": "74343"
},
{
"name": "SQLPL",
"bytes": "2196"
},
{
"name": "Vue",
"bytes": "53092"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import REDIRECT_FIELD_NAME, login as django_login
from django.views.decorators.csrf import csrf_exempt
from django.http import QueryDict, HttpResponseNotAllowed, HttpRequest
from django.http.multipartparser import MultiPartParser
from zerver.models import Realm, UserProfile, get_client, get_user_profile_by_api_key
from zerver.lib.response import json_error, json_unauthorized, json_success
from django.shortcuts import resolve_url
from django.utils.decorators import available_attrs
from django.utils.timezone import now as timezone_now
from django.conf import settings
from zerver.lib.queue import queue_json_publish
from zerver.lib.subdomains import get_subdomain, user_matches_subdomain
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.utils import statsd, is_remote_server
from zerver.lib.exceptions import RateLimited, JsonableError, ErrorCode
from zerver.lib.rate_limiter import incr_ratelimit, is_ratelimited, \
api_calls_left, RateLimitedUser
from zerver.lib.request import REQ, has_request_variables, JsonableError, RequestVariableMissingError
from django.core.handlers import base
from functools import wraps
import base64
import datetime
import ujson
import logging
from io import BytesIO
from six.moves import zip, urllib
from typing import Union, Any, Callable, Sequence, Dict, Optional, TypeVar, Text, cast
from zerver.lib.str_utils import force_bytes
from zerver.lib.logging_util import create_logger
# This is a hack to ensure that RemoteZulipServer always exists even
# if Zilencer isn't enabled.
if settings.ZILENCER_ENABLED:
from zilencer.models import get_remote_server_by_uuid, RemoteZulipServer
else:
from mock import Mock
get_remote_server_by_uuid = Mock()
RemoteZulipServer = Mock() # type: ignore # https://github.com/JukkaL/mypy/issues/1188
ViewFuncT = TypeVar('ViewFuncT', bound=Callable[..., HttpResponse])
ReturnT = TypeVar('ReturnT')
## logger setup
webhook_logger = create_logger(
"zulip.zerver.webhooks", settings.API_KEY_ONLY_WEBHOOK_LOG_PATH, 'DEBUG')
class _RespondAsynchronously(object):
pass
# Return RespondAsynchronously from an @asynchronous view if the
# response will be provided later by calling handler.zulip_finish(),
# or has already been provided this way. We use this for longpolling
# mode.
RespondAsynchronously = _RespondAsynchronously()
def asynchronous(method):
# type: (Callable[..., Union[HttpResponse, _RespondAsynchronously]]) -> Callable[..., Union[HttpResponse, _RespondAsynchronously]]
# TODO: this should be the correct annotation when mypy gets fixed: type:
# (Callable[[HttpRequest, base.BaseHandler, Sequence[Any], Dict[str, Any]],
# Union[HttpResponse, _RespondAsynchronously]]) ->
# Callable[[HttpRequest, Sequence[Any], Dict[str, Any]], Union[HttpResponse, _RespondAsynchronously]]
# TODO: see https://github.com/python/mypy/issues/1655
@wraps(method)
def wrapper(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> Union[HttpResponse, _RespondAsynchronously]
return method(request, handler=request._tornado_handler, *args, **kwargs)
if getattr(method, 'csrf_exempt', False):
wrapper.csrf_exempt = True # type: ignore # https://github.com/JukkaL/mypy/issues/1170
return wrapper
def update_user_activity(request, user_profile):
# type: (HttpRequest, UserProfile) -> None
# update_active_status also pushes to rabbitmq, and it seems
# redundant to log that here as well.
if request.META["PATH_INFO"] == '/json/users/me/presence':
return
if hasattr(request, '_query'):
query = request._query
else:
query = request.META['PATH_INFO']
event = {'query': query,
'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(timezone_now()),
'client': request.client.name}
queue_json_publish("user_activity", event, lambda event: None)
# Based on django.views.decorators.http.require_http_methods
def require_post(func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(func)
def wrapper(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if (request.method != "POST" and
not (request.method == "SOCKET" and
request.META['zulip.emulated_method'] == "POST")):
if request.method == "SOCKET":
err_method = "SOCKET/%s" % (request.META['zulip.emulated_method'],)
else:
err_method = request.method
logging.warning('Method Not Allowed (%s): %s', err_method, request.path,
extra={'status_code': 405, 'request': request})
return HttpResponseNotAllowed(["POST"])
return func(request, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
def require_realm_admin(func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(func)
def wrapper(request, user_profile, *args, **kwargs):
# type: (HttpRequest, UserProfile, *Any, **Any) -> HttpResponse
if not user_profile.is_realm_admin:
raise JsonableError(_("Must be a realm administrator"))
return func(request, user_profile, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
from zerver.lib.user_agent import parse_user_agent
def get_client_name(request, is_browser_view):
# type: (HttpRequest, bool) -> Text
# If the API request specified a client in the request content,
# that has priority. Otherwise, extract the client from the
# User-Agent.
if 'client' in request.GET:
return request.GET['client']
if 'client' in request.POST:
return request.POST['client']
if "HTTP_USER_AGENT" in request.META:
user_agent = parse_user_agent(request.META["HTTP_USER_AGENT"])
else:
user_agent = None
if user_agent is not None:
# We could check for a browser's name being "Mozilla", but
# e.g. Opera and MobileSafari don't set that, and it seems
# more robust to just key off whether it was a browser view
if is_browser_view and not user_agent["name"].startswith("Zulip"):
# Avoid changing the client string for browsers, but let
# the Zulip desktop and mobile apps be themselves.
return "website"
else:
return user_agent["name"]
else:
# In the future, we will require setting USER_AGENT, but for
# now we just want to tag these requests so we can review them
# in logs and figure out the extent of the problem
if is_browser_view:
return "website"
else:
return "Unspecified"
def process_client(request, user_profile, is_browser_view=False, client_name=None,
remote_server_request=False):
# type: (HttpRequest, UserProfile, bool, Optional[Text], bool) -> None
if client_name is None:
client_name = get_client_name(request, is_browser_view)
request.client = get_client(client_name)
if not remote_server_request:
update_user_activity(request, user_profile)
class InvalidZulipServerError(JsonableError):
code = ErrorCode.INVALID_ZULIP_SERVER
data_fields = ['role']
def __init__(self, role):
# type: (Text) -> None
self.role = role # type: Text
@staticmethod
def msg_format():
# type: () -> Text
return "Zulip server auth failure: {role} is not registered"
class InvalidZulipServerKeyError(JsonableError):
@staticmethod
def msg_format():
# type: () -> Text
return "Zulip server auth failure: key does not match role {role}"
def validate_api_key(request, role, api_key, is_webhook=False,
client_name=None):
# type: (HttpRequest, Optional[Text], Text, bool, Optional[Text]) -> Union[UserProfile, RemoteZulipServer]
# Remove whitespace to protect users from trivial errors.
api_key = api_key.strip()
if role is not None:
role = role.strip()
if settings.ZILENCER_ENABLED and role is not None and is_remote_server(role):
try:
remote_server = get_remote_server_by_uuid(role)
except RemoteZulipServer.DoesNotExist:
raise InvalidZulipServerError(role)
if api_key != remote_server.api_key:
raise InvalidZulipServerKeyError(role)
if get_subdomain(request) != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
raise JsonableError(_("Invalid subdomain for push notifications bouncer"))
request.user = remote_server
request._email = "zulip-server:" + role
remote_server.rate_limits = ""
process_client(request, remote_server, remote_server_request=True)
return remote_server
user_profile = access_user_by_api_key(request, api_key, email=role)
if user_profile.is_incoming_webhook and not is_webhook:
raise JsonableError(_("This API is not available to incoming webhook bots."))
request.user = user_profile
request._email = user_profile.email
process_client(request, user_profile, client_name=client_name)
return user_profile
def validate_account_and_subdomain(request, user_profile):
# type: (HttpRequest, UserProfile) -> None
if not user_profile.is_active:
raise JsonableError(_("Account not active"))
if user_profile.realm.deactivated:
raise JsonableError(_("Realm for account has been deactivated"))
# Either the subdomain matches, or processing a websockets message
# in the message_sender worker (which will have already had the
# subdomain validated), or we're accessing Tornado from and to
# localhost (aka spoofing a request as the user).
if (not user_matches_subdomain(get_subdomain(request), user_profile) and
not (request.method == "SOCKET" and
request.META['SERVER_NAME'] == "127.0.0.1") and
not (settings.RUNNING_INSIDE_TORNADO and
request.META["SERVER_NAME"] == "127.0.0.1" and
request.META["REMOTE_ADDR"] == "127.0.0.1")):
logging.warning("User %s (%s) attempted to access API on wrong subdomain (%s)" % (
user_profile.email, user_profile.realm.subdomain, get_subdomain(request)))
raise JsonableError(_("Account is not associated with this subdomain"))
def access_user_by_api_key(request, api_key, email=None):
# type: (HttpRequest, Text, Optional[Text]) -> UserProfile
try:
user_profile = get_user_profile_by_api_key(api_key)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid API key"))
if email is not None and email != user_profile.email:
# This covers the case that the API key is correct, but for a
# different user. We may end up wanting to relaxing this
# constraint or give a different error message in the future.
raise JsonableError(_("Invalid API key"))
validate_account_and_subdomain(request, user_profile)
return user_profile
# Use this for webhook views that don't get an email passed in.
def api_key_only_webhook_view(client_name):
# type: (Text) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
# TODO The typing here could be improved by using the Extended Callable types:
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#extended-callable-types
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request, api_key=REQ(),
*args, **kwargs):
# type: (HttpRequest, Text, *Any, **Any) -> HttpResponse
user_profile = validate_api_key(request, None, api_key, is_webhook=True,
client_name="Zulip{}Webhook".format(client_name))
if settings.RATE_LIMITING:
rate_limit_user(request, user_profile, domain='all')
try:
return view_func(request, user_profile, *args, **kwargs)
except Exception as err:
if request.content_type == 'application/json':
try:
request_body = ujson.dumps(ujson.loads(request.body), indent=4)
except ValueError:
request_body = str(request.body)
else:
request_body = str(request.body)
message = """
user: {email} ({realm})
client: {client_name}
URL: {path_info}
content_type: {content_type}
body:
{body}
""".format(
email=user_profile.email,
realm=user_profile.realm.string_id,
client_name=request.client.name,
body=request_body,
path_info=request.META.get('PATH_INFO', None),
content_type=request.content_type,
)
webhook_logger.exception(message)
raise err
return _wrapped_func_arguments
return _wrapped_view_func
# From Django 1.8, modified to leave off ?next=/
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
# type: (Text, Optional[Text], Text) -> HttpResponseRedirect
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urllib.parse.urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
# Don't add ?next=/, to keep our URLs clean
if next != '/':
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urllib.parse.urlunparse(login_url_parts))
# From Django 1.8
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
# type: (Callable[[HttpResponse], bool], Optional[Text], Text) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if test_func(request):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urllib.parse.urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def logged_in_and_active(request):
# type: (HttpRequest) -> bool
if not request.user.is_authenticated:
return False
if not request.user.is_active:
return False
if request.user.realm.deactivated:
return False
return user_matches_subdomain(get_subdomain(request), request.user)
def do_login(request, user_profile):
# type: (HttpRequest, UserProfile) -> None
"""Creates a session, logging in the user, using the Django method,
and also adds helpful data needed by our server logs.
"""
django_login(request, user_profile)
request._email = user_profile.email
process_client(request, user_profile, is_browser_view=True)
def add_logging_data(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
request._email = request.user.email
request._query = view_func.__name__
process_client(request, request.user, is_browser_view=True)
return rate_limit()(view_func)(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def human_users_only(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if request.user.is_bot:
return json_error(_("This endpoint does not accept bot requests."))
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
# Based on Django 1.8's @login_required
def zulip_login_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=settings.HOME_NOT_LOGGED_IN):
# type: (Optional[Callable[..., HttpResponse]], Text, Text) -> Union[Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]], Callable[..., HttpResponse]]
actual_decorator = user_passes_test(
logged_in_and_active,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
# Add necessary logging data via add_logging_data
return actual_decorator(add_logging_data(function))
return actual_decorator
def require_server_admin(view_func):
# type: (ViewFuncT) -> ViewFuncT
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
request._query = view_func.__name__
if not request.user.is_staff:
return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)
return add_logging_data(view_func)(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
# authenticated_api_view will add the authenticated user's
# user_profile to the view function's arguments list, since we have to
# look it up anyway. It is deprecated in favor on the REST API
# versions.
def authenticated_api_view(is_webhook=False):
# type: (bool) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request, email=REQ(), api_key=REQ(default=None),
api_key_legacy=REQ('api-key', default=None),
*args, **kwargs):
# type: (HttpRequest, Text, Optional[Text], Optional[Text], *Any, **Any) -> HttpResponse
if api_key is None:
api_key = api_key_legacy
if api_key is None:
raise RequestVariableMissingError("api_key")
user_profile = validate_api_key(request, email, api_key, is_webhook)
# Apply rate limiting
limited_func = rate_limit()(view_func)
return limited_func(request, user_profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# A more REST-y authentication decorator, using, in particular, HTTP Basic
# authentication.
def authenticated_rest_api_view(is_webhook=False):
# type: (bool) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@wraps(view_func)
def _wrapped_func_arguments(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# First try block attempts to get the credentials we need to do authentication
try:
# Grab the base64-encoded authentication string, decode it, and split it into
# the email and API key
auth_type, credentials = request.META['HTTP_AUTHORIZATION'].split()
# case insensitive per RFC 1945
if auth_type.lower() != "basic":
return json_error(_("This endpoint requires HTTP basic authentication."))
role, api_key = base64.b64decode(force_bytes(credentials)).decode('utf-8').split(":")
except ValueError:
return json_unauthorized(_("Invalid authorization header for basic auth"))
except KeyError:
return json_unauthorized("Missing authorization header for basic auth")
# Now we try to do authentication or die
try:
# profile is a Union[UserProfile, RemoteZulipServer]
profile = validate_api_key(request, role, api_key, is_webhook)
except JsonableError as e:
return json_unauthorized(e.msg)
# Apply rate limiting
return rate_limit()(view_func)(request, profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
def process_as_post(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# Adapted from django/http/__init__.py.
# So by default Django doesn't populate request.POST for anything besides
# POST requests. We want this dict populated for PATCH/PUT, so we have to
# do it ourselves.
#
# This will not be required in the future, a bug will be filed against
# Django upstream.
if not request.POST:
# Only take action if POST is empty.
if request.META.get('CONTENT_TYPE', '').startswith('multipart'):
# Note that request._files is just the private attribute that backs the
# FILES property, so we are essentially setting request.FILES here. (In
# Django 1.5 FILES was still a read-only property.)
request.POST, request._files = MultiPartParser(
request.META,
BytesIO(request.body),
request.upload_handlers,
request.encoding
).parse()
else:
request.POST = QueryDict(request.body, encoding=request.encoding)
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticate_log_and_execute_json(request, view_func, *args, **kwargs):
# type: (HttpRequest, Callable[..., HttpResponse], *Any, **Any) -> HttpResponse
if not request.user.is_authenticated:
return json_error(_("Not logged in"), status=401)
user_profile = request.user
validate_account_and_subdomain(request, user_profile)
if user_profile.is_incoming_webhook:
raise JsonableError(_("Webhook bots can only access webhooks"))
process_client(request, user_profile, is_browser_view=True)
request._email = user_profile.email
return rate_limit()(view_func)(request, user_profile, *args, **kwargs)
# Checks if the request is a POST request and that the user is logged
# in. If not, return an error (the @login_required behavior of
# redirecting to a login page doesn't make sense for json views)
def authenticated_json_post_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticated_json_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def is_local_addr(addr):
# type: (Text) -> bool
return addr in ('127.0.0.1', '::1')
# These views are used by the main Django server to notify the Tornado server
# of events. We protect them from the outside world by checking a shared
# secret, and also the originating IP (for now).
def authenticate_notify(request):
# type: (HttpRequest) -> bool
return (is_local_addr(request.META['REMOTE_ADDR']) and
request.POST.get('secret') == settings.SHARED_SECRET)
def client_is_exempt_from_rate_limiting(request):
# type: (HttpRequest) -> bool
# Don't rate limit requests from Django that come from our own servers,
# and don't rate-limit dev instances
return ((request.client and request.client.name.lower() == 'internal') and
(is_local_addr(request.META['REMOTE_ADDR']) or
settings.DEBUG_RATE_LIMITING))
def internal_notify_view(is_tornado_view):
# type: (bool) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
# The typing here could be improved by using the Extended Callable types:
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#extended-callable-types
"""Used for situations where something running on the Zulip server
needs to make a request to the (other) Django/Tornado processes running on
the server."""
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@require_post
@wraps(view_func)
def _wrapped_func_arguments(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if not authenticate_notify(request):
return json_error(_('Access denied'), status=403)
is_tornado_request = hasattr(request, '_tornado_handler')
# These next 2 are not security checks; they are internal
# assertions to help us find bugs.
if is_tornado_view and not is_tornado_request:
raise RuntimeError('Tornado notify view called with no Tornado handler')
if not is_tornado_view and is_tornado_request:
raise RuntimeError('Django notify view called with Tornado handler')
request._email = "internal"
return view_func(request, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# Converter functions for use with has_request_variables
def to_non_negative_int(s):
# type: (Text) -> int
x = int(s)
if x < 0:
raise ValueError("argument is negative")
return x
def to_not_negative_int_or_none(s):
# type: (Text) -> Optional[int]
if s:
return to_non_negative_int(s)
return None
def flexible_boolean(boolean):
# type: (Text) -> bool
"""Returns True for any of "1", "true", or "True". Returns False otherwise."""
if boolean in ("1", "true", "True"):
return True
else:
return False
def to_utc_datetime(timestamp):
# type: (Text) -> datetime.datetime
return timestamp_to_datetime(float(timestamp))
def statsd_increment(counter, val=1):
# type: (Text, int) -> Callable[[Callable[..., ReturnT]], Callable[..., ReturnT]]
"""Increments a statsd counter on completion of the
decorated function.
Pass the name of the counter to this decorator-returning function."""
def wrapper(func):
# type: (Callable[..., ReturnT]) -> Callable[..., ReturnT]
@wraps(func)
def wrapped_func(*args, **kwargs):
# type: (*Any, **Any) -> ReturnT
ret = func(*args, **kwargs)
statsd.incr(counter, val)
return ret
return wrapped_func
return wrapper
def rate_limit_user(request, user, domain):
# type: (HttpRequest, UserProfile, Text) -> None
"""Returns whether or not a user was rate limited. Will raise a RateLimited exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
entity = RateLimitedUser(user, domain=domain)
ratelimited, time = is_ratelimited(entity)
request._ratelimit_applied_limits = True
request._ratelimit_secs_to_freedom = time
request._ratelimit_over_limit = ratelimited
# Abort this request if the user is over their rate limits
if ratelimited:
statsd.incr("ratelimiter.limited.%s.%s" % (type(user), user.id))
raise RateLimited()
incr_ratelimit(entity)
calls_remaining, time_reset = api_calls_left(entity)
request._ratelimit_remaining = calls_remaining
request._ratelimit_secs_to_freedom = time_reset
def rate_limit(domain='all'):
# type: (Text) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
"""Rate-limits a view. Takes an optional 'domain' param if you wish to
rate limit different types of API calls independently.
Returns a decorator"""
def wrapper(func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@wraps(func)
def wrapped_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# It is really tempting to not even wrap our original function
# when settings.RATE_LIMITING is False, but it would make
# for awkward unit testing in some situations.
if not settings.RATE_LIMITING:
return func(request, *args, **kwargs)
if client_is_exempt_from_rate_limiting(request):
return func(request, *args, **kwargs)
try:
user = request.user
except Exception:
# TODO: This logic is not tested, and I'm not sure we are
# doing the right thing here.
user = None
if not user:
logging.error("Requested rate-limiting on %s but user is not authenticated!" %
func.__name__)
return func(request, *args, **kwargs)
# Rate-limiting data is stored in redis
# We also only support rate-limiting authenticated
# views right now.
# TODO(leo) - implement per-IP non-authed rate limiting
rate_limit_user(request, user, domain)
return func(request, *args, **kwargs)
return wrapped_func
return wrapper
def return_success_on_head_request(view_func: Callable[..., HttpResponse]) -> Callable[..., HttpResponse]:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if request.method == 'HEAD':
return json_success()
return view_func(request, *args, **kwargs)
return _wrapped_view_func
|
{
"content_hash": "ced38d529124a9a70894aaa695592aa6",
"timestamp": "",
"source": "github",
"line_count": 720,
"max_line_length": 171,
"avg_line_length": 44.31388888888889,
"alnum_prop": 0.6423243277126559,
"repo_name": "brockwhittaker/zulip",
"id": "6369bb17f470a5e004b6ca43a8cfeeb712b0f0de",
"size": "31907",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "442662"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "515931"
},
{
"name": "JavaScript",
"bytes": "2195008"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "393671"
},
{
"name": "Puppet",
"bytes": "87413"
},
{
"name": "Python",
"bytes": "3948219"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "65702"
}
],
"symlink_target": ""
}
|
import logging
import os
import pwd
import random
import string
import uuid
import kazoo.exceptions
from kazoo.client import KazooClient
from kazoo.retry import KazooRetry
from kazoo.security import ACL, ANYONE_ID_UNSAFE, Permissions
from dcos_internal_utils import utils
log = logging.getLogger(__name__)
ANYONE_READ = [ACL(Permissions.READ, ANYONE_ID_UNSAFE)]
ANYONE_ALL = [ACL(Permissions.ALL, ANYONE_ID_UNSAFE)]
class Bootstrapper(object):
def __init__(self, zk_hosts):
conn_retry_policy = KazooRetry(max_tries=-1, delay=0.1, max_delay=0.1)
cmd_retry_policy = KazooRetry(max_tries=3, delay=0.3, backoff=1, max_delay=1, ignore_expire=False)
self._zk = KazooClient(hosts=zk_hosts, connection_retry=conn_retry_policy, command_retry=cmd_retry_policy)
@property
def zk(self):
"""Lazy initialize zk client"""
if self._zk.connected:
return self._zk
self._zk.start()
return self._zk
def close(self):
if self._zk.connected:
self._zk.stop()
self._zk.close()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def cluster_id(self, path, readonly=False):
dirpath = os.path.dirname(os.path.abspath(path))
log.info('Opening {} for locking'.format(dirpath))
with utils.Directory(dirpath) as d:
log.info('Taking exclusive lock on {}'.format(dirpath))
with d.lock():
if readonly:
zkid = None
else:
zkid = str(uuid.uuid4()).encode('ascii')
zkid = self._consensus('/cluster-id', zkid, ANYONE_READ)
zkid = zkid.decode('ascii')
if os.path.exists(path):
fileid = utils.read_file_line(path)
if fileid == zkid:
log.info('Cluster ID in ZooKeeper and file are the same: {}'.format(zkid))
return zkid
log.info('Writing cluster ID from ZK to {} via rename'.format(path))
tmppath = path + '.tmp'
with open(tmppath, 'w') as f:
f.write(zkid + '\n')
os.rename(tmppath, path)
log.info('Wrote cluster ID to {}'.format(path))
return zkid
def generate_oauth_secret(self, path):
log.info('Generating oauth secret at {}'.format(path))
possible_auth_token = ''.join(random.choice(string.ascii_letters) for _ in range(64))
self.zk.ensure_path('/dcos', ANYONE_ALL)
consensus_auth_token = self._consensus('/dcos/auth-token-secret',
possible_auth_token.encode('ascii'), ANYONE_READ)
_write_file(path, consensus_auth_token, 0o600, 'dcos_oauth')
return consensus_auth_token
def _consensus(self, path, value, acl=None):
if value is not None:
log.info('Reaching consensus about znode {}'.format(path))
try:
self.zk.create(path, value, acl=acl)
log.info('Consensus znode {} created'.format(path))
except kazoo.exceptions.NodeExistsError:
log.info('Consensus znode {} already exists'.format(path))
pass
self.zk.sync(path)
return self.zk.get(path)[0]
def _write_file(path, data, mode, owner='root'):
dirpath = os.path.dirname(os.path.abspath(path))
log.info('Opening {} for locking'.format(dirpath))
with utils.Directory(dirpath) as d:
log.info('Taking exclusive lock on {}'.format(dirpath))
with d.lock():
umask_original = os.umask(0)
try:
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
log.info('Writing {} with mode {:o}'.format(path, mode))
tmppath = path + '.tmp'
with os.fdopen(os.open(tmppath, flags, mode), 'wb') as f:
f.write(data)
os.rename(tmppath, path)
user = pwd.getpwnam(owner)
os.chown(path, user.pw_uid, user.pw_gid)
finally:
os.umask(umask_original)
|
{
"content_hash": "6e68d37eba24365cd9417aa2339c4ea0",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 114,
"avg_line_length": 35.855932203389834,
"alnum_prop": 0.5627511226660364,
"repo_name": "branden/dcos",
"id": "5fa4cb35e5791f246959c87e481c87abd04efbb4",
"size": "4231",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packages/bootstrap/extra/dcos_internal_utils/bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "Groovy",
"bytes": "711"
},
{
"name": "HTML",
"bytes": "84154"
},
{
"name": "Lua",
"bytes": "183652"
},
{
"name": "Makefile",
"bytes": "179"
},
{
"name": "Python",
"bytes": "1266823"
},
{
"name": "Shell",
"bytes": "83031"
}
],
"symlink_target": ""
}
|
''' Message queues. '''
from rq import Connection, Queue
import app.config
import worker
import worker.scrape
import worker.archive
_config = app.config.get_config()
_redis = app.database.get_redis(dict(_config.items('redis')))
_redis_worker = dict(_config.items('redis_worker'))
_scrape_queue = Queue('scrape', connection=_redis)
_archive_queue = Queue('archive', connection=_redis)
def dummy_job():
'''
This dummy job is used by init_queues().
It must be defined at the module level so that Python RQ can import it;
it cannot be an anonymous or nested function.
'''
pass
def init_queues(redis):
'''
Python RQ creates queues lazily, but we want them created eagerly.
This function submits a dummy job to each queue to force Python RQ to
create that queue.
'''
queues = {q for q in globals().values() if type(q) is Queue}
with Connection(redis):
for queue in queues:
queue.enqueue(dummy_job)
def remove_unused_queues(redis):
'''
Remove queues in RQ that are not defined in this file.
This is useful for removing queues that used to be defined but were later
removed.
'''
queue_names = {q.name for q in globals().values() if type(q) is Queue}
with Connection(redis):
for queue in Queue.all():
if queue.name not in queue_names:
queue.empty()
redis.srem('rq:queues', 'rq:queue:{}'.format(queue.name))
def schedule_username(username, site, group_id,
total, tracker_id, test=False):
'''
Queue a job to fetch results for the specified username from the specified
site.
Keyword arguments:
test -- don't archive, update site with result (default: False)
'''
kwargs = {
'username': username,
'site_id': site.id,
'group_id': group_id,
'total': total,
'tracker_id': tracker_id,
'test': test
}
job = _scrape_queue.enqueue_call(
func=worker.scrape.check_username,
kwargs=kwargs,
timeout=_redis_worker['username_timeout']
)
description = 'Checking {} for user "{}"'.format(site.name, username)
worker.init_job(job=job, description=description)
return job.id
def schedule_archive(username, group_id, tracker_id):
''' Queue a job to archive results for the job id. '''
job = _archive_queue.enqueue_call(
func=worker.archive.create_archive,
args=[username, group_id, tracker_id],
timeout=_redis_worker['archive_timeout']
)
description = 'Archiving results for username "{}"'.format(username)
worker.init_job(job=job, description=description)
def schedule_site_test(site, tracker_id):
'''
Queue a job to test a site.
Arguments:
site -- the site to test.
tracker_id -- the unique tracker ID for the job.
'''
job = _scrape_queue.enqueue_call(
func=worker.scrape.test_site,
args=[site.id, tracker_id],
timeout=30
)
description = 'Testing site "{}"'.format(site.name)
worker.init_job(job=job, description=description)
return job.id
|
{
"content_hash": "93aabdb207af7db69245243169eee743",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 26.478991596638654,
"alnum_prop": 0.63344969850841,
"repo_name": "TeamHG-Memex/hgprofiler",
"id": "941fec2b5ce80d7751b3cb2fc5afe0e5fd77e062",
"size": "3151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/app/queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22445"
},
{
"name": "Dart",
"bytes": "157800"
},
{
"name": "HTML",
"bytes": "66599"
},
{
"name": "JavaScript",
"bytes": "448280"
},
{
"name": "Python",
"bytes": "213096"
}
],
"symlink_target": ""
}
|
import sys
import bluetooth
import time
import struct
if sys.version < '3':
input = raw_input
sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
if len(sys.argv) < 2:
print("usage: l2capclient.py <addr>")
sys.exit(2)
bt_addr=sys.argv[1]
port = 0x01
print("trying to connect to %s on PSM 0x%X" % (bt_addr, port))
sock.connect((bt_addr, port))
print("connected. listening...")
while True:
# data = input()
# if(len(data) == 0): break
# sock.send(data)
try:
# sock.send("")
data = sock.recv(1024)
# val_name, val_num = parse_buf(buf, data)
# short_val = struct.unpack('h', data)
print "Value: " + str(data)
time.sleep(.01)
except KeyboardInterrupt:
break
sock.close()
|
{
"content_hash": "bec7942daa80b70cf451d1da80de5ec1",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 62,
"avg_line_length": 18.585365853658537,
"alnum_prop": 0.6036745406824147,
"repo_name": "mcelhennyi/smart_home",
"id": "923bd8b749dcf74874dc07cb104956d99146e8c3",
"size": "781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/SmokerModule/smoker_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "11863"
},
{
"name": "Python",
"bytes": "8414"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import sys
import logging
import multiprocessing
from .constants import *
from .errors import *
from ._compat import *
__all__ = ['colors', 'log', 'setup_logging']
logging_level_map = {
SILENCE: logging.NOTSET,
INFO: logging.INFO,
VERBOSE: logging.DEBUG,
DEBUG: logging.DEBUG
}
def run_once(f):
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return f(*args, **kwargs)
wrapper.has_run = False
return wrapper
@run_once
def setup_logging():
l = logging.getLogger("giraffez.console_logger")
l.addHandler(logging.StreamHandler(sys.stderr))
l.setLevel(logging.DEBUG)
logging_logger = logging.getLogger("giraffez.console_logger")
CONSOLE_HEADER = "\033[95m"
CONSOLE_BLUE = "\033[94m"
CONSOLE_GREEN = "\033[92m"
CONSOLE_GRAY = "\033[37m"
CONSOLE_WHITE = "\033[97m"
CONSOLE_WARNING = "\033[93m"
CONSOLE_ENDC = "\033[0m"
CONSOLE_FAIL = "\033[91m"
CONSOLE_BOLD = "\033[1m"
CONSOLE_UNDERLINE = "\033[4m"
CONSOLE_DISABLED = "\033[38;5;240m"
class colors(object):
is_colorful = True
@classmethod
def colorize(cls, color, s):
if colors.is_colorful:
return "{}{}\033[0m".format(color, s)
return s
@classmethod
def blue(cls, s):
return cls.colorize(CONSOLE_BLUE, s)
@classmethod
def green(cls, s):
return cls.colorize(CONSOLE_GREEN, s)
@classmethod
def gray(cls, s):
return cls.colorize(CONSOLE_GRAY, s)
@classmethod
def white(cls, s):
return cls.colorize(CONSOLE_WHITE, s)
@classmethod
def disabled(cls, s):
return cls.colorize(CONSOLE_DISABLED, s)
@classmethod
def bold(cls, s):
return cls.colorize(CONSOLE_BOLD, s)
@classmethod
def fail(cls, s):
return cls.colorize(CONSOLE_FAIL, s)
def highlight(s):
return colors.white(colors.bold("{}: ".format(s)))
class Logger(object):
def __init__(self, fd=None, level=INFO):
self.fd = fd
self.level = level
@property
def level(self):
return self._level
@level.setter
def level(self, v):
if v not in [SILENCE, INFO, VERBOSE, DEBUG]:
raise GiraffeError("Invalid log level set")
self._level = v
def _write(self, args, level=None, console=False):
if level is not None:
if self.level < level:
return
args = list(args)
if len(args) == 0:
return
elif len(args) == 1:
category = None
else:
category = args.pop(0)
if category is not None:
category = highlight(category)
if isinstance(args[0], Exception):
args = [repr(arg) for arg in args]
else:
args = [arg if isinstance(arg, basestring) else str(arg) for arg in args]
logging_level = logging_level_map.get(level, logging.INFO)
msg = "".join([str(m) for m in args])
if category:
msg = "{}{}".format(category, msg)
if console:
sys.stderr.write(msg)
else:
logging_logger.log(logging_level, str(msg))
return self
def write(self, *args, **kwargs):
return self._write(args, level=None, console=kwargs.get("console", False))
def info(self, *args, **kwargs):
return self._write(args, level=INFO, console=kwargs.get("console", False))
def verbose(self, *args, **kwargs):
return self._write(args, level=VERBOSE, console=kwargs.get("console", False))
def debug(self, *args, **kwargs):
return self._write(args, level=DEBUG, console=kwargs.get("console", False))
def fatal(self, msg=""):
self._write(["System", msg])
sys.exit(1)
class LockingLogger(Logger):
def __init__(self, fd=None, level=INFO):
super(LockingLogger, self).__init__(fd, level)
self.lock = multiprocessing.Lock()
def _write(self, args, level=None, console=True):
with self.lock:
super(LockingLogger, self)._write(args, level=level, console=console)
log = LockingLogger(sys.stderr, level=INFO)
|
{
"content_hash": "24fb2b5c86fa257c4d7adae40d520d7e",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 85,
"avg_line_length": 25.609756097560975,
"alnum_prop": 0.6026190476190476,
"repo_name": "capitalone/giraffez",
"id": "8ad8598230d3df571968aa75054bf365799cb6c6",
"size": "4816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "giraffez/logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "196016"
},
{
"name": "C++",
"bytes": "35182"
},
{
"name": "Makefile",
"bytes": "834"
},
{
"name": "Python",
"bytes": "269726"
},
{
"name": "Shell",
"bytes": "6939"
}
],
"symlink_target": ""
}
|
"""The tests for the REST switch platform."""
import asyncio
import aiohttp
import homeassistant.components.switch.rest as rest
from homeassistant.setup import setup_component
from homeassistant.util.async_ import run_coroutine_threadsafe
from homeassistant.helpers.template import Template
from tests.common import get_test_home_assistant, assert_setup_component
class TestRestSwitchSetup:
"""Tests for setting up the REST switch platform."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_missing_config(self):
"""Test setup with configuration missing required entries."""
assert not run_coroutine_threadsafe(
rest.async_setup_platform(self.hass, {
'platform': 'rest'
}, None),
self.hass.loop
).result()
def test_setup_missing_schema(self):
"""Test setup with resource missing schema."""
assert not run_coroutine_threadsafe(
rest.async_setup_platform(self.hass, {
'platform': 'rest',
'resource': 'localhost'
}, None),
self.hass.loop
).result()
def test_setup_failed_connect(self, aioclient_mock):
"""Test setup when connection error occurs."""
aioclient_mock.get('http://localhost', exc=aiohttp.ClientError)
assert not run_coroutine_threadsafe(
rest.async_setup_platform(self.hass, {
'platform': 'rest',
'resource': 'http://localhost',
}, None),
self.hass.loop
).result()
def test_setup_timeout(self, aioclient_mock):
"""Test setup when connection timeout occurs."""
aioclient_mock.get('http://localhost', exc=asyncio.TimeoutError())
assert not run_coroutine_threadsafe(
rest.async_setup_platform(self.hass, {
'platform': 'rest',
'resource': 'http://localhost',
}, None),
self.hass.loop
).result()
def test_setup_minimum(self, aioclient_mock):
"""Test setup with minimum configuration."""
aioclient_mock.get('http://localhost', status=200)
with assert_setup_component(1, 'switch'):
assert setup_component(self.hass, 'switch', {
'switch': {
'platform': 'rest',
'resource': 'http://localhost'
}
})
assert aioclient_mock.call_count == 1
def test_setup(self, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get('http://localhost', status=200)
assert setup_component(self.hass, 'switch', {
'switch': {
'platform': 'rest',
'name': 'foo',
'resource': 'http://localhost',
'headers': {'Content-type': 'application/json'},
'body_on': 'custom on text',
'body_off': 'custom off text',
}
})
assert aioclient_mock.call_count == 1
assert_setup_component(1, 'switch')
class TestRestSwitch:
"""Tests for REST switch platform."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.name = 'foo'
self.method = 'post'
self.resource = 'http://localhost/'
self.headers = {'Content-type': 'application/json'}
self.auth = None
self.body_on = Template('on', self.hass)
self.body_off = Template('off', self.hass)
self.switch = rest.RestSwitch(
self.name, self.resource, self.method, self.headers, self.auth,
self.body_on, self.body_off, None, 10)
self.switch.hass = self.hass
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_name(self):
"""Test the name."""
assert self.name == self.switch.name
def test_is_on_before_update(self):
"""Test is_on in initial state."""
assert self.switch.is_on is None
def test_turn_on_success(self, aioclient_mock):
"""Test turn_on."""
aioclient_mock.post(self.resource, status=200)
run_coroutine_threadsafe(
self.switch.async_turn_on(), self.hass.loop).result()
assert self.body_on.template == \
aioclient_mock.mock_calls[-1][2].decode()
assert self.switch.is_on
def test_turn_on_status_not_ok(self, aioclient_mock):
"""Test turn_on when error status returned."""
aioclient_mock.post(self.resource, status=500)
run_coroutine_threadsafe(
self.switch.async_turn_on(), self.hass.loop).result()
assert self.body_on.template == \
aioclient_mock.mock_calls[-1][2].decode()
assert self.switch.is_on is None
def test_turn_on_timeout(self, aioclient_mock):
"""Test turn_on when timeout occurs."""
aioclient_mock.post(self.resource, status=500)
run_coroutine_threadsafe(
self.switch.async_turn_on(), self.hass.loop).result()
assert self.switch.is_on is None
def test_turn_off_success(self, aioclient_mock):
"""Test turn_off."""
aioclient_mock.post(self.resource, status=200)
run_coroutine_threadsafe(
self.switch.async_turn_off(), self.hass.loop).result()
assert self.body_off.template == \
aioclient_mock.mock_calls[-1][2].decode()
assert not self.switch.is_on
def test_turn_off_status_not_ok(self, aioclient_mock):
"""Test turn_off when error status returned."""
aioclient_mock.post(self.resource, status=500)
run_coroutine_threadsafe(
self.switch.async_turn_off(), self.hass.loop).result()
assert self.body_off.template == \
aioclient_mock.mock_calls[-1][2].decode()
assert self.switch.is_on is None
def test_turn_off_timeout(self, aioclient_mock):
"""Test turn_off when timeout occurs."""
aioclient_mock.post(self.resource, exc=asyncio.TimeoutError())
run_coroutine_threadsafe(
self.switch.async_turn_on(), self.hass.loop).result()
assert self.switch.is_on is None
def test_update_when_on(self, aioclient_mock):
"""Test update when switch is on."""
aioclient_mock.get(self.resource, text=self.body_on.template)
run_coroutine_threadsafe(
self.switch.async_update(), self.hass.loop).result()
assert self.switch.is_on
def test_update_when_off(self, aioclient_mock):
"""Test update when switch is off."""
aioclient_mock.get(self.resource, text=self.body_off.template)
run_coroutine_threadsafe(
self.switch.async_update(), self.hass.loop).result()
assert not self.switch.is_on
def test_update_when_unknown(self, aioclient_mock):
"""Test update when unknown status returned."""
aioclient_mock.get(self.resource, text='unknown status')
run_coroutine_threadsafe(
self.switch.async_update(), self.hass.loop).result()
assert self.switch.is_on is None
def test_update_timeout(self, aioclient_mock):
"""Test update when timeout occurs."""
aioclient_mock.get(self.resource, exc=asyncio.TimeoutError())
run_coroutine_threadsafe(
self.switch.async_update(), self.hass.loop).result()
assert self.switch.is_on is None
|
{
"content_hash": "c813ab03b861da184cf1f0945b4ef7bc",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 75,
"avg_line_length": 36.766666666666666,
"alnum_prop": 0.5995337391529595,
"repo_name": "tinloaf/home-assistant",
"id": "cb27ab40855473b82962080db3d73d2565ffa064",
"size": "7721",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/switch/test_rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
}
|
"""
Adds support for first class features that can be added to the edX platform.
"""
from stevedore.extension import ExtensionManager
class PluginError(Exception):
"""
Base Exception for when an error was found regarding features.
"""
pass
class PluginManager(object):
"""
Base class that manages plugins to the edX platform.
"""
@classmethod
def get_available_plugins(cls):
"""
Returns a dict of all the plugins that have been made available through the platform.
"""
# Note: we're creating the extension manager lazily to ensure that the Python path
# has been correctly set up. Trying to create this statically will fail, unfortunately.
if not hasattr(cls, "_plugins"):
plugins = {}
extension_manager = ExtensionManager(namespace=cls.NAMESPACE) # pylint: disable=no-member
for plugin_name in extension_manager.names():
plugins[plugin_name] = extension_manager[plugin_name].plugin
cls._plugins = plugins
return cls._plugins
@classmethod
def get_plugin(cls, name):
"""
Returns the plugin with the given name.
"""
plugins = cls.get_available_plugins()
if name not in plugins:
raise PluginError("No such plugin {name} for entry point {namespace}".format(
name=name,
namespace=cls.NAMESPACE # pylint: disable=no-member
))
return plugins[name]
|
{
"content_hash": "0fa908b5312a6f2e570130f74db58ce6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 102,
"avg_line_length": 33.71111111111111,
"alnum_prop": 0.6282135794330916,
"repo_name": "bmedx/platform-core",
"id": "576dc9cec5a112c5190601ff0110acdc48965e43",
"size": "1517",
"binary": false,
"copies": "152",
"ref": "refs/heads/master",
"path": "platform_core/lib/api/plugins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3564"
},
{
"name": "Python",
"bytes": "76963"
}
],
"symlink_target": ""
}
|
class EmptyBoardException(Exception):
'''
Exception to be raised for
errors involving an empty board.
'''
pass
class EndsMismatchException(Exception):
'''
Exception to be raised for errors
involving mismatched domino ends.
'''
pass
class GameInProgressException(Exception):
'''
Exception to be raised for errors
involving a game that is in progress.
'''
pass
class GameOverException(Exception):
'''
Exception to be raised for errors
involving a game that has ended.
'''
pass
class NoSuchDominoException(Exception):
'''
Exception to be raised for errors
involving a specific missing domino.
'''
pass
class NoSuchPlayerException(Exception):
'''
Exception to be raised for errors
involving a specific missing player.
'''
pass
class SeriesOverException(Exception):
'''
Exception to be raised for errors
involving a series that has ended.
'''
pass
|
{
"content_hash": "c2e1ca2fcce30388ccaa18496b5fb129",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 41,
"avg_line_length": 20.604166666666668,
"alnum_prop": 0.6663296258847321,
"repo_name": "abw333/dominoes",
"id": "fdc93558217a61b4f4907707bbe951612fadbdf6",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dominoes/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131060"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'XForm.created_by'
db.add_column(u'logger_xform', 'created_by',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'XForm.created_by'
db.delete_column(u'logger_xform', 'created_by_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.attachment': {
'Meta': {'object_name': 'Attachment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'})
},
'logger.instance': {
'Meta': {'object_name': 'Instance'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryCollectionField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': "orm['logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'logger.instancehistory': {
'Meta': {'object_name': 'InstanceHistory'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_history'", 'to': "orm['logger.Instance']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'logger.note': {
'Meta': {'object_name': 'Note'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['logger.Instance']"}),
'note': ('django.db.models.fields.TextField', [], {})
},
'logger.project': {
'Meta': {'unique_together': "(('name', 'organization'),)", 'object_name': 'Project'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_owner'", 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('jsonfield.fields.JSONField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_org'", 'to': u"orm['auth.User']"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_stars': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'project_stars'", 'symmetrical': 'False', 'to': u"orm['auth.User']"})
},
'logger.projectxform': {
'Meta': {'unique_together': "(('xform', 'project'),)", 'object_name': 'ProjectXForm'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'px_creator'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'px_projects'", 'to': "orm['logger.Project']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'px_xforms'", 'to': "orm['logger.XForm']"})
},
'logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string', 'project'), ('user', 'sms_id_string', 'project'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'instances_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.Project']"}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'logger.ziggyinstance': {
'Meta': {'object_name': 'ZiggyInstance'},
'client_version': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.CharField', [], {'max_length': '249'}),
'form_instance': ('django.db.models.fields.TextField', [], {}),
'form_version': ('django.db.models.fields.CharField', [], {'default': "u'1.0'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '249'}),
'reporter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggys'", 'to': u"orm['auth.User']"}),
'server_version': ('django.db.models.fields.BigIntegerField', [], {}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggy_submissions'", 'null': 'True', 'to': "orm['logger.XForm']"})
}
}
complete_apps = ['logger']
|
{
"content_hash": "62157bc0a62a42519c14efdfac0d258c",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 195,
"avg_line_length": 81.0297619047619,
"alnum_prop": 0.5511643282156762,
"repo_name": "piqoni/onadata",
"id": "c2648c4e1cad62d88bef6ec5a92f1415c35a0c16",
"size": "13637",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "onadata/apps/logger/migrations/0052_auto__add_field_xform_created_by.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "74590"
},
{
"name": "Gettext Catalog",
"bytes": "558412"
},
{
"name": "HTML",
"bytes": "248856"
},
{
"name": "JavaScript",
"bytes": "904742"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "2569475"
},
{
"name": "Shell",
"bytes": "11725"
}
],
"symlink_target": ""
}
|
import os
import glob
import shutil
from collections import defaultdict
from IPython.utils.traitlets import Bool
from nbgrader.apps.baseapp import TransferApp, transfer_aliases, transfer_flags
from nbgrader.utils import check_mode, parse_utc
aliases = {}
aliases.update(transfer_aliases)
aliases.update({
})
flags = {}
flags.update(transfer_flags)
flags.update({
'update': (
{'CollectApp' : {'update': True}},
"Update existing submissions with ones that have newer timestamps."
),
})
def groupby(l, key=lambda x: x):
d = defaultdict(list)
for item in l:
d[key(item)].append(item)
return d
class CollectApp(TransferApp):
name = u'nbgrader-collect'
description = u'Collect an assignment from the nbgrader exchange'
aliases = aliases
flags = flags
examples = """
Collect assignments students have submitted. For the usage of instructors.
This command is run from the top-level nbgrader folder. Before running
this command, you must set the unique `course_id` for the course. It must be
unique for each instructor/course combination. To set it in the config
file add a line to the `nbgrader_config.py` file:
c.NbGraderConfig.course_id = 'phys101'
To pass the `course_id` at the command line, add `--course=phys101` to any
of the below commands.
To collect `assignment1` for all students:
nbgrader collect assignment1
To collect `assignment1` for only `student1`:
nbgrader collect --student=student1 assignment1
Collected assignments will go into the `submitted` folder with the proper
directory structure to start grading. All submissions are timestamped and
students can turn an assignment in multiple times. The `collect` command
will always get the most recent submission from each student, but it will
never overwrite an existing submission unless you provide the `--update`
flag:
nbgrader collect --update assignment1
"""
update = Bool(
False,
config=True,
help="Update existing submissions with ones that have newer timestamps."
)
def init_args(self):
if len(self.extra_args) == 1:
self.assignment_id = self.extra_args[0]
else:
self.fail("Invalid number of argument, call as `nbgrader release ASSIGNMENT`.")
def _path_to_record(self, path):
filename = os.path.split(path)[1]
# Only split twice on +, giving three components. This allows usernames with +.
filename_list = filename.rsplit('+', 2)
if len(filename_list) != 3:
self.fail("Invalid filename: {}".format(filename))
username = filename_list[0]
timestamp = parse_utc(filename_list[2])
return {'username': username, 'filename': filename, 'timestamp': timestamp}
def _sort_by_timestamp(self, records):
return sorted(records, key=lambda item: item['timestamp'], reverse=True)
def init_src(self):
self.course_path = os.path.join(self.exchange_directory, self.course_id)
self.inbound_path = os.path.join(self.course_path, 'inbound')
if not os.path.isdir(self.inbound_path):
self.fail("Course not found: {}".format(self.inbound_path))
if not check_mode(self.inbound_path, read=True, execute=True):
self.fail("You don't have read permissions for the directory: {}".format(self.inbound_path))
student_id = self.student_id if self.student_id else '*'
pattern = os.path.join(self.inbound_path, '{}+{}+*'.format(student_id, self.assignment_id))
records = [self._path_to_record(f) for f in glob.glob(pattern)]
usergroups = groupby(records, lambda item: item['username'])
self.src_records = [self._sort_by_timestamp(v)[0] for v in usergroups.values()]
def init_dest(self):
submit_dir = os.path.abspath(self.submitted_directory)
if not os.path.isdir(submit_dir):
os.mkdir(submit_dir)
def copy_files(self):
for rec in self.src_records:
student_id = rec['username']
src_path = os.path.join(self.inbound_path, rec['filename'])
dest_path = os.path.abspath(self.directory_structure.format(
nbgrader_step=self.submitted_directory,
student_id=student_id,
assignment_id=self.assignment_id
))
copy = False
updating = False
if os.path.isdir(dest_path):
existing_timestamp = self._get_existing_timestamp(dest_path)
new_timestamp = rec['timestamp']
if self.update and (existing_timestamp is None or new_timestamp > existing_timestamp):
copy = True
updating = True
else:
copy = True
if copy:
if updating:
self.log.info("Updating submission: {} {}".format(student_id, self.assignment_id))
shutil.rmtree(dest_path)
else:
self.log.info("Collecting submission: {} {}".format(student_id, self.assignment_id))
self.do_copy(src_path, dest_path)
else:
if self.update:
self.log.info("No newer submission to collect: {} {}".format(
student_id, self.assignment_id
))
else:
self.log.info("Submission already exists, use --update to update: {} {}".format(
student_id, self.assignment_id
))
|
{
"content_hash": "1285b5e4a618456577fc1e908d0ef169",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 106,
"avg_line_length": 39.25,
"alnum_prop": 0.600964021346187,
"repo_name": "jdfreder/nbgrader",
"id": "3a6bece38563c4739d9fefabeb6fc8fc92f5fac5",
"size": "5809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nbgrader/apps/collectapp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2134"
},
{
"name": "JavaScript",
"bytes": "124792"
},
{
"name": "Python",
"bytes": "360006"
},
{
"name": "Smarty",
"bytes": "22255"
}
],
"symlink_target": ""
}
|
"""Tests for HomematicIP Cloud cover."""
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
DOMAIN as COVER_DOMAIN,
)
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.const import STATE_CLOSED, STATE_OPEN
from homeassistant.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(hass):
"""Test that we do not set up an access point."""
assert (
await async_setup_component(
hass, COVER_DOMAIN, {COVER_DOMAIN: {"platform": HMIPC_DOMAIN}}
)
is True
)
assert not hass.data.get(HMIPC_DOMAIN)
async def test_hmip_cover_shutter(hass, default_mock_hap):
"""Test HomematicipCoverShutte."""
entity_id = "cover.sofa_links"
entity_name = "Sofa links"
device_model = "HmIP-FBL"
ha_state, hmip_device = get_and_check_entity_basics(
hass, default_mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == "closed"
assert ha_state.attributes["current_position"] == 0
assert ha_state.attributes["current_tilt_position"] == 0
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"cover", "open_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "set_shutter_level"
assert hmip_device.mock_calls[-1][1] == (0,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 0)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover",
"set_cover_position",
{"entity_id": entity_id, "position": "50"},
blocking=True,
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "set_shutter_level"
assert hmip_device.mock_calls[-1][1] == (0.5,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 0.5)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 50
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover", "close_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 5
assert hmip_device.mock_calls[-1][0] == "set_shutter_level"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 1)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_CLOSED
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 0
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover", "stop_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 7
assert hmip_device.mock_calls[-1][0] == "set_shutter_stop"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_CLOSED
async def test_hmip_cover_slats(hass, default_mock_hap):
"""Test HomematicipCoverSlats."""
entity_id = "cover.sofa_links"
entity_name = "Sofa links"
device_model = "HmIP-FBL"
ha_state, hmip_device = get_and_check_entity_basics(
hass, default_mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_CLOSED
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 0
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"cover", "open_cover_tilt", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "set_slats_level"
assert hmip_device.mock_calls[-1][1] == (0,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 0)
await async_manipulate_test_data(hass, hmip_device, "slatsLevel", 0)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
await hass.services.async_call(
"cover",
"set_cover_tilt_position",
{"entity_id": entity_id, "tilt_position": "50"},
blocking=True,
)
assert len(hmip_device.mock_calls) == service_call_counter + 4
assert hmip_device.mock_calls[-1][0] == "set_slats_level"
assert hmip_device.mock_calls[-1][1] == (0.5,)
await async_manipulate_test_data(hass, hmip_device, "slatsLevel", 0.5)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 50
await hass.services.async_call(
"cover", "close_cover_tilt", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 6
assert hmip_device.mock_calls[-1][0] == "set_slats_level"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(hass, hmip_device, "slatsLevel", 1)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover", "stop_cover_tilt", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 8
assert hmip_device.mock_calls[-1][0] == "set_shutter_stop"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
|
{
"content_hash": "bc22bb6a98d4c6f178b8d971a4832994",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 77,
"avg_line_length": 41.28387096774193,
"alnum_prop": 0.6718237224566338,
"repo_name": "joopert/home-assistant",
"id": "22922303f9e0d2ce04c967c9ae54f430dc49706c",
"size": "6399",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/homematicip_cloud/test_cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from magnumclient.common import utils
from magnumclient.openstack.common.apiclient import exceptions as exc
from magnumclient.tests import utils as test_utils
class CommonFiltersTest(test_utils.BaseTestCase):
def test_limit(self):
result = utils.common_filters(limit=42)
self.assertEqual(['limit=42'], result)
def test_limit_0(self):
result = utils.common_filters(limit=0)
self.assertEqual([], result)
def test_other(self):
for key in ('marker', 'sort_key', 'sort_dir'):
result = utils.common_filters(**{key: 'test'})
self.assertEqual(['%s=test' % key], result)
class SplitAndDeserializeTest(test_utils.BaseTestCase):
def test_split_and_deserialize(self):
ret = utils.split_and_deserialize('str=foo')
self.assertEqual(('str', 'foo'), ret)
ret = utils.split_and_deserialize('int=1')
self.assertEqual(('int', 1), ret)
ret = utils.split_and_deserialize('bool=false')
self.assertEqual(('bool', False), ret)
ret = utils.split_and_deserialize('list=[1, "foo", 2]')
self.assertEqual(('list', [1, "foo", 2]), ret)
ret = utils.split_and_deserialize('dict={"foo": 1}')
self.assertEqual(('dict', {"foo": 1}), ret)
ret = utils.split_and_deserialize('str_int="1"')
self.assertEqual(('str_int', "1"), ret)
def test_split_and_deserialize_fail(self):
self.assertRaises(exc.CommandError,
utils.split_and_deserialize, 'foo:bar')
class ArgsArrayToPatchTest(test_utils.BaseTestCase):
def test_args_array_to_patch(self):
my_args = {
'attributes': ['str=foo', 'int=1', 'bool=true',
'list=[1, 2, 3]', 'dict={"foo": "bar"}'],
'op': 'add',
}
patch = utils.args_array_to_patch(my_args['op'],
my_args['attributes'])
self.assertEqual([{'op': 'add', 'value': 'foo', 'path': '/str'},
{'op': 'add', 'value': 1, 'path': '/int'},
{'op': 'add', 'value': True, 'path': '/bool'},
{'op': 'add', 'value': [1, 2, 3], 'path': '/list'},
{'op': 'add', 'value': {"foo": "bar"},
'path': '/dict'}], patch)
def test_args_array_to_patch_format_error(self):
my_args = {
'attributes': ['foobar'],
'op': 'add',
}
self.assertRaises(exc.CommandError, utils.args_array_to_patch,
my_args['op'], my_args['attributes'])
def test_args_array_to_patch_remove(self):
my_args = {
'attributes': ['/foo', 'extra/bar'],
'op': 'remove',
}
patch = utils.args_array_to_patch(my_args['op'],
my_args['attributes'])
self.assertEqual([{'op': 'remove', 'path': '/foo'},
{'op': 'remove', 'path': '/extra/bar'}], patch)
|
{
"content_hash": "9f8fff90b54bf49330c392e089b7f4ad",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 77,
"avg_line_length": 38.05,
"alnum_prop": 0.5174113009198423,
"repo_name": "ramielrowe/python-magnumclient",
"id": "e67cf38af0c73c2a57087a7238a63e76e4b4d12b",
"size": "3700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnumclient/tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "280159"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.