text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MarketSegment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='MarketStrategy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
]
|
{
"content_hash": "dc3ba5707c87ad98df3f8a8bc8edb824",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 114,
"avg_line_length": 28.185185185185187,
"alnum_prop": 0.5216819973718791,
"repo_name": "Semprini/cbe-retail",
"id": "c03485eb1867d5b450471c7d51b1c26b4efb6eef",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "retail/market/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "24348"
},
{
"name": "Python",
"bytes": "233612"
},
{
"name": "Shell",
"bytes": "970"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
from json import loads
import os
from dublincore.models import AbstractQualifiedDublinCoreTerm
import re
try:
from urllib.request import urlopen
from urllib.parse import urlencode, quote, unquote
except ImportError:
from urllib import urlopen, urlencode, quote, unquote
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
from django.db.models import Model, get_model
from django.template import (Context, Node, TextNode, Template,
TemplateSyntaxError, TOKEN_TEXT, TOKEN_VAR, TOKEN_COMMENT, TOKEN_BLOCK)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
import Image
import ImageFile
import ImageOps
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
@register.filter
def dcterm(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
def convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
content = convert(dict(AbstractQualifiedDublinCoreTerm.DCTERMS)[content.term])
return content
@register.filter
def resource_type(content):
return content._meta.verbose_name
@register.filter
def contact(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
if not content:
return ''
if not content.is_authenticated():
content = "Anonymous"
elif content.first_name:
content = """<a href='/user/{un}/'>{fn} {ln}<a>""".format(fn=content.first_name, ln=content.last_name, un=content.username)
else:
content = """<a href='/user/{un}/'>{un}<a>""".format(un=content.username)
return content
@register.filter
def best_name(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
if not content.is_authenticated():
content = "Anonymous"
elif content.first_name:
content = """{fn} {ln}""".format(fn=content.first_name, ln=content.last_name, un=content.username)
else:
content = content.username
return content
|
{
"content_hash": "c30657a8f409c36c894b2a7a406e9784",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 131,
"avg_line_length": 33.04854368932039,
"alnum_prop": 0.7379553466509988,
"repo_name": "hydroshare/hydroshare_temp",
"id": "5a1a2465c084963e2dea07fb7b7b6287693b8fe8",
"size": "3404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hs_core/templatetags/hydroshare_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "173515"
},
{
"name": "C++",
"bytes": "4136"
},
{
"name": "CSS",
"bytes": "228598"
},
{
"name": "CoffeeScript",
"bytes": "34267"
},
{
"name": "JavaScript",
"bytes": "736373"
},
{
"name": "Python",
"bytes": "1870088"
},
{
"name": "Shell",
"bytes": "5335"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
import asposewordscloud
from asposewordscloud.WordsApi import WordsApi
from asposewordscloud.WordsApi import ApiException
from asposewordscloud.models import ProtectionRequest
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Words API SDK
api_client = asposewordscloud.ApiClient.ApiClient(apiKey, appSid, True)
wordsApi = WordsApi(api_client)
#set input file name
filename = "SampleProtectedBlankWordDocument.docx"
destfilename = "updated-" + filename
body = ProtectionRequest.ProtectionRequest()
body.Password = "aspose"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Words Cloud SDK API to unprotect a word document
response = wordsApi.DeleteUnprotectDocument(name=filename, body=body, filename=destfilename)
if response.Status == "OK":
print "Document has been unprotected successfully"
#download updated document from storage server
response = storageApi.GetDownload(Path=destfilename)
outfilename = "c:/temp/" + destfilename
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
{
"content_hash": "224c521948255fd6e78e9503f9c31cb0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 96,
"avg_line_length": 34.254901960784316,
"alnum_prop": 0.7418431597023469,
"repo_name": "asposewords/Aspose_Words_Cloud",
"id": "d5e398aab542a7288adf9e0e5a6748219f533ca3",
"size": "1747",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Examples/Python/Examples/RemoveProctionFromWordDocument.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "472472"
},
{
"name": "Java",
"bytes": "370151"
},
{
"name": "JavaScript",
"bytes": "310294"
},
{
"name": "Objective-C",
"bytes": "569511"
},
{
"name": "PHP",
"bytes": "265802"
},
{
"name": "Python",
"bytes": "350862"
},
{
"name": "Ruby",
"bytes": "386058"
}
],
"symlink_target": ""
}
|
import re
from typing import Dict
from pandas import DataFrame
from lib.data_source import DataSource
from lib.time import datetime_isoformat
class OxfordGovernmentResponseDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = dataframes[0]
data = data.drop(columns=["CountryName", "ConfirmedCases", "ConfirmedDeaths"])
data = data.drop(columns=[col for col in data.columns if col.endswith("_Notes")])
data = data.drop(columns=[col for col in data.columns if col.endswith("_IsGeneral")])
data["date"] = data["Date"].apply(lambda x: datetime_isoformat(x, "%Y%m%d"))
# Drop redundant flag columns
data = data.drop(columns=[col for col in data.columns if "_Flag" in col])
# Join with ISO data to retrieve our known regions
data = data.rename(columns={"CountryCode": "3166-1-alpha-3"}).merge(aux["country_codes"])
data = data.rename(columns={"3166-1-alpha-2": "country_code"})
# Make sure subregions are allowed to match this data
data = data.drop(columns=["key"])
data["subregion1_code"] = None
data["subregion2_code"] = None
data["locality_code"] = None
subregion1_mask = data["RegionCode"].notna()
data.loc[subregion1_mask, "subregion1_code"] = data.loc[
subregion1_mask, "RegionCode"
].apply(lambda x: x.split("_")[-1])
# Some region codes are not using the usual ISO standard
hotfix_triplets = [("GB", "SCO", "SCT"), ("GB", "WAL", "WLS")]
for country_code, subregion1_code, replacement in hotfix_triplets:
country_code_mask = data["country_code"] == country_code
subregion1_code_mask = data["subregion1_code"] == subregion1_code
data.loc[country_code_mask & subregion1_code_mask, "subregion1_code"] = replacement
# Use consistent naming convention for columns
keep_columns = [
"date",
"country_code",
"subregion1_code",
"subregion2_code",
"locality_code",
"StringencyIndex",
]
data = data[[col for col in data.columns if "_" in col or col in keep_columns]]
data.columns = [
col if col in keep_columns else col.split("_")[-1].lower() for col in data.columns
]
data.columns = [re.sub(r"\s", "_", col) for col in data.columns]
# Fix column typos
data = data.rename(
columns={
"StringencyIndex": "stringency_index",
"close_public_transport": "public_transport_closing",
"debt/contract_relief": "debt_relief",
}
)
# Remove unneeded columns and output
return data.drop(columns=["wildcard"])
|
{
"content_hash": "2ff3aa46cac9bf50a3e3f6a0196ea43e",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 97,
"avg_line_length": 42.88059701492537,
"alnum_prop": 0.6028541594152453,
"repo_name": "GoogleCloudPlatform/covid-19-open-data",
"id": "39d3a1dec97e6eb6877dc01b5a45ba273bb4f3ba",
"size": "3449",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/pipelines/oxford_government_response/oxford_government_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1839"
},
{
"name": "Python",
"bytes": "901210"
},
{
"name": "Shell",
"bytes": "10370"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['ConstantTrend'] , ['BestCycle'] , ['AR'] );
|
{
"content_hash": "f4da4fd667d66159bcd5b314fb2c9975",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 79,
"avg_line_length": 38,
"alnum_prop": 0.7039473684210527,
"repo_name": "antoinecarme/pyaf",
"id": "18557cdb85b92b9513a340e59a8e3499d21e2541",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_ConstantTrend_BestCycle_AR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
from datetime import datetime
from subprocess import Popen
from malmopy.agent import QLearnerAgent, TemporalMemory
from malmopy.environment.gym import GymEnvironment
try:
from malmopy.visualization.tensorboard import TensorboardVisualizer
from malmopy.visualization.tensorboard.cntk import CntkConverter
TENSORBOARD_AVAILABLE = True
except ImportError:
print('Cannot import tensorboard, using ConsoleVisualizer.')
from malmopy.visualization import ConsoleVisualizer
TENSORBOARD_AVAILABLE = False
ROOT_FOLDER = 'results/baselines/%s/dqn/%s-%s'
EPOCH_SIZE = 250000
def visualize_training(visualizer, step, rewards, tag='Training'):
visualizer.add_entry(step, '%s/reward per episode' % tag, sum(rewards))
visualizer.add_entry(step, '%s/max.reward' % tag, max(rewards))
visualizer.add_entry(step, '%s/min.reward' % tag, min(rewards))
visualizer.add_entry(step, '%s/actions per episode' % tag, len(rewards)-1)
def run_experiment(environment, backend, device_id, max_epoch, record, logdir,
visualizer):
env = GymEnvironment(environment,
monitoring_path=logdir if record else None)
if backend == 'cntk':
from malmopy.model.cntk import QNeuralNetwork as CntkDQN
model = CntkDQN((4, 84, 84), env.available_actions, momentum=0.95,
device_id=device_id, visualizer=visualizer)
else:
from malmopy.model.chainer import DQNChain, QNeuralNetwork as ChainerDQN
chain = DQNChain((4, 84, 84), env.available_actions)
target_chain = DQNChain((4, 84, 84), env.available_actions)
model = ChainerDQN(chain, target_chain,
momentum=0.95, device_id=device_id)
memory = TemporalMemory(1000000, model.input_shape[1:])
agent = QLearnerAgent("DQN Agent", env.available_actions, model, memory,
0.99, 32, train_after=10000, reward_clipping=(-1, 1),
visualizer=visualizer)
state = env.reset()
reward = 0
agent_done = False
viz_rewards = []
max_training_steps = max_epoch * EPOCH_SIZE
for step in range(1, max_training_steps + 1):
# check if env needs reset
if env.done:
visualize_training(visualizer, step, viz_rewards)
agent.inject_summaries(step)
viz_rewards = []
state = env.reset()
# select an action
action = agent.act(state, reward, agent_done, is_training=True)
# take a step
state, reward, agent_done = env.do(action)
viz_rewards.append(reward)
if (step % EPOCH_SIZE) == 0:
model.save('%s-%s-dqn_%d.model' %
(backend, environment, step / EPOCH_SIZE))
if __name__ == '__main__':
arg_parser = ArgumentParser(description='OpenAI Gym DQN example')
arg_parser.add_argument('-b', '--backend', type=str, default='cntk',
choices=['cntk', 'chainer'],
help='Neural network backend to use.')
arg_parser.add_argument('-d', '--device', type=int, default=-1,
help='GPU device on which to run the experiment.')
arg_parser.add_argument('-r', '--record', action='store_true',
help='Setting this will record runs')
arg_parser.add_argument('-e', '--epochs', type=int, default=50,
help='Number of epochs. One epoch is 250k actions.')
arg_parser.add_argument('-p', '--port', type=int, default=6006,
help='Port for running tensorboard.')
arg_parser.add_argument('env', type=str, metavar='environment',
nargs='?', default='Breakout-v3',
help='Gym environment to run')
args = arg_parser.parse_args()
logdir = ROOT_FOLDER % (args.env, args.backend, datetime.utcnow().isoformat())
if TENSORBOARD_AVAILABLE:
visualizer = TensorboardVisualizer()
visualizer.initialize(logdir, None)
print('Starting tensorboard ...')
p = Popen(['tensorboard', '--logdir=results', '--port=%d' % args.port])
else:
visualizer = ConsoleVisualizer()
print('Starting experiment')
run_experiment(args.env, args.backend, int(args.device), args.epochs,
args.record, logdir, visualizer)
|
{
"content_hash": "fea55196471a07b9a91b3e276af349ba",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 82,
"avg_line_length": 39.535714285714285,
"alnum_prop": 0.6172086720867209,
"repo_name": "village-people/flying-pig",
"id": "7cfe7717ed32b3abdd05c2ef775b10cd0bc703f4",
"size": "5642",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/atari/gym_atari_dqn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "370890"
},
{
"name": "Shell",
"bytes": "67"
},
{
"name": "XSLT",
"bytes": "372375"
}
],
"symlink_target": ""
}
|
from migen import *
# Goals:
# - understand how to create simple logical core
# - understand how to create a FSM
# Indications:
# You can choose to code the clock core with only
# - If/Elif/Else in Migen:
# https://m-labs.hk/migen/manual/fhdl.html#statements
#
# or with
#
# - a FSM in Migen:
# https://github.com/m-labs/migen/blob/master/examples/basic/fsm.py
#
# or try both...
# Core ---------------------------------------------------------------------------------------------
class Core(Module):
def __init__(self):
# Module's interface
self.tick = Signal() # input
self.seconds = Signal(6) # output
self.minutes = Signal(6) # output
self.hours = Signal(5) # output
# Set interface
self.inc_minutes = Signal() # input
self.inc_hours = Signal() # output
# # #
# Synchronous assigment
self.sync += [
# -- TO BE COMPLETED --
# At each tick
If(self.tick,
self.seconds.eq(self.seconds + 1),
# [...]
)
# -- TO BE COMPLETED --
]
# CoreFSM ------------------------------------------------------------------------------------------
class CoreFSM(Module):
def __init__(self):
# Module's interface
self.tick = Signal() # input
self.seconds = Signal(6) # output
self.minutes = Signal(6) # output
self.hours = Signal(5) # output
# Set interface
self.inc_minutes = Signal() # input
self.inc_hours = Signal() # output
# # #
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
# -- TO BE COMPLETED --
fsm.act("IDLE",
If(self.tick,
NextState("INC_SECONDS")
),
)
fsm.act("INC_SECONDS",
NextValue(self.seconds, self.seconds + 1),
NextState("IDLE")
)
fsm.act("INC_MINUTES",
NextState("IDLE")
)
fsm.act("INC_HOURS",
NextState("IDLE")
)
# -- TO BE COMPLETED --
# Main ---------------------------------------------------------------------------------------------
if __name__ == '__main__':
# Seven segment simulation
print("Core simulation")
# Uncomment the one you want to simulate
dut = Core()
#dut = CoreFSM()
def show_time(cycle, hours, minutes, seconds):
print("cycle %d: hh:%02d, mm:%02d, ss:%02d" %(cycle, hours, minutes, seconds))
def dut_tb(dut):
yield dut.tick.eq(1) # Tick active on each cycle
for i in range(3600*48):
yield
show_time(i,
(yield dut.hours),
(yield dut.minutes),
(yield dut.seconds))
run_simulation(dut, dut_tb(dut), vcd_name="core.vcd")
|
{
"content_hash": "413fadd3e2d46f7da5bd076e4924ca95",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 100,
"avg_line_length": 26.401869158878505,
"alnum_prop": 0.4715044247787611,
"repo_name": "litex-hub/fpga_101",
"id": "97a317222f67a2f7a553018d90e1c8170b6cd43c",
"size": "2825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab002/core.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2501"
},
{
"name": "Makefile",
"bytes": "694"
},
{
"name": "Python",
"bytes": "87767"
},
{
"name": "VHDL",
"bytes": "516"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals, division
def use(**kwargs):
"""
Updates the active resource configuration to the passed
keyword arguments.
Invoking this method without passing arguments will just return the
active resource configuration.
@returns
The previous configuration.
"""
config = dict(use.config)
use.config.update(kwargs)
return config
# Set the initial resource configuration.
use.config = {}
|
{
"content_hash": "8b744fb064b751951aa8d71d97ee0738",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 71,
"avg_line_length": 24.1,
"alnum_prop": 0.7053941908713693,
"repo_name": "armet/python-armet",
"id": "0a904fdf3e49915a3dc1a15e5a5154c2d39b3d72",
"size": "506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "armet/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285382"
}
],
"symlink_target": ""
}
|
from django import forms
from haystack.forms import FacetedSearchForm
from crispy_forms.layout import *
from crispy_forms.bootstrap import *
import datetime
class MyForm(FacetedSearchForm):
#faceted_choices = (('author', 'Author'), ('creators', 'Creators'),('subjects', 'Subjects'),
# ('public', 'Public'),('discoverable', 'Discoverable'), ('language', 'Language'), ('resource_type', 'Resource Type'))
#faceted_field = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=faceted_choices)
faceted_fields = ['author', 'subjects', 'resource_type', 'public', 'owners_names', 'discoverable']
def search(self):
sqs = super(MyForm, self).search().filter(discoverable=True)
if not self.is_valid():
return self.no_query_found()
for field in self.faceted_fields:
sqs = sqs.facet(field)
#sqs.stats('viewers_count').stats_results()['viewers_count']['max']
#sqs = sqs.range_facet('viewers_count', start=0.0, end=100.0, gap=20.0)
#sqs = sqs.date_facet('created', start_date=datetime.date(2015, 01, 01), end_date=datetime.date(2015, 12, 01), gap_by='month')
#sqs = sqs.date_facet('modified', start_date=datetime.date(2015, 01, 01), end_date=datetime.date(2015, 12, 01), gap_by='month')
#sqs = sqs.stats('created')
#sqs = sqs.stats_results()
return sqs
|
{
"content_hash": "587168024435001b72c069c7b1057c8e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 140,
"avg_line_length": 50.107142857142854,
"alnum_prop": 0.6478973627940128,
"repo_name": "hydroshare/hydroshare",
"id": "1ca2bd7f3d491dd624ab0c95b1fe5268058babbf",
"size": "1403",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "hs_core/customer_form.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183727"
},
{
"name": "Dockerfile",
"bytes": "1433"
},
{
"name": "HTML",
"bytes": "950010"
},
{
"name": "JavaScript",
"bytes": "1450537"
},
{
"name": "Python",
"bytes": "5786593"
},
{
"name": "R",
"bytes": "4904"
},
{
"name": "Shell",
"bytes": "94173"
},
{
"name": "Vue",
"bytes": "32043"
}
],
"symlink_target": ""
}
|
import mock
from oslo_config import cfg
from arsenal.director import scheduler
from arsenal.strategy import base as sb
from arsenal.tests.unit import base
CONF = cfg.CONF
def strat_directive_mock():
return [
sb.CacheNode('node-a', 'image-a', 'checksum-a'),
sb.CacheNode('node-b', 'image-b', 'checksum-b'),
sb.CacheNode('node-c', 'image-c', 'checksum-c'),
sb.CacheNode('node-d', 'image-d', 'checksum-d'),
sb.CacheNode('node-e', 'image-e', 'checksum-e'),
sb.EjectNode('node-f'),
sb.EjectNode('node-g'),
sb.EjectNode('node-h'),
sb.EjectNode('node-i'),
sb.EjectNode('node-J'),
]
FAKE_FLAVOR_DATA = [
sb.FlavorInput('io-flavor', lambda n: True),
sb.FlavorInput('memory-flavor', lambda n: True),
sb.FlavorInput('cpu-flavor', lambda n: True)
]
FAKE_IMAGE_DATA = [
sb.ImageInput('Ubuntu', 'aaaa', 'ubuntu-checksum'),
sb.ImageInput('CoreOS', 'aaaa', 'coreos-checksum'),
sb.ImageInput('ArchLinux', 'aaaa', 'archlinux-checksum')
]
FAKE_NODE_DATA = [
sb.NodeInput('abcd', 'io-flavor', False, False),
sb.NodeInput('hjkl', 'memory-flavor', False, False),
sb.NodeInput('asdf', 'compute-flavor', False, False)
]
class TestScheduler(base.TestCase):
@mock.patch.object(scheduler.DirectorScheduler, 'periodic_tasks')
@mock.patch('arsenal.director.onmetal_scout.OnMetalV1Scout')
def setUp(self, onmetal_scout_mock, periodic_task_mock):
super(TestScheduler, self).setUp()
CONF.set_override('scout', 'onmetal_scout.OnMetalV1Scout', 'director')
CONF.set_override('dry_run', False, 'director')
# Make sure both rate limiters are off at the beginning of the test.
CONF.set_override('cache_directive_rate_limit', 0, 'director')
CONF.set_override('eject_directive_rate_limit', 0, 'director')
onmetal_scout_mock.retrieve_node_data = mock.Mock()
onmetal_scout_mock.retrieve_node_data.return_value = FAKE_NODE_DATA
self.onmetal_scout_mock = onmetal_scout_mock
self.scheduler = scheduler.DirectorScheduler()
self.scheduler.scout = onmetal_scout_mock
self.scheduler.node_data = FAKE_NODE_DATA
self.scheduler.flavor_data = FAKE_FLAVOR_DATA
self.scheduler.image_data = FAKE_IMAGE_DATA
self.scheduler.strat.directives = strat_directive_mock
self.issue_action_mock = mock.MagicMock()
self.scheduler.scout.issue_action = self.issue_action_mock
def test_cache_rate_limit_on(self):
CONF.set_override('cache_directive_rate_limit', 2, 'director')
self.scheduler.cache_rate_limiter = (
scheduler.get_configured_cache_rate_limiter())
self.assertIsNotNone(self.scheduler.cache_rate_limiter)
self.scheduler.issue_directives(None)
# 2 cache node directives, plus 5 eject node directives
self.assertEqual(7, self.issue_action_mock.call_count)
def test_cache_rate_limit_off(self):
CONF.set_override('cache_directive_rate_limit', 0, 'director')
self.scheduler.cache_rate_limiter = (
scheduler.get_configured_cache_rate_limiter())
self.assertIsNone(self.scheduler.cache_rate_limiter)
self.scheduler.issue_directives(None)
# 5 cache node directives, plus 5 eject node directives
self.assertEqual(10, self.issue_action_mock.call_count)
def test_eject_rate_limit_on(self):
CONF.set_override('eject_directive_rate_limit', 2, 'director')
self.scheduler.eject_rate_limiter = (
scheduler.get_configured_ejection_rate_limiter())
self.assertIsNotNone(self.scheduler.eject_rate_limiter)
self.scheduler.issue_directives(None)
# 2 eject node directives, plus 5 cache node directives
self.assertEqual(7, self.issue_action_mock.call_count)
def test_eject_rate_limit_off(self):
CONF.set_override('eject_directive_rate_limit', 0, 'director')
self.scheduler.eject_rate_limiter = (
scheduler.get_configured_ejection_rate_limiter())
self.assertIsNone(self.scheduler.eject_rate_limiter)
self.scheduler.issue_directives(None)
# 5 eject node directives, plus 5 cache node directives
self.assertEqual(10, self.issue_action_mock.call_count)
def test_both_rate_limit_on(self):
CONF.set_override('cache_directive_rate_limit', 3, 'director')
self.scheduler.cache_rate_limiter = (
scheduler.get_configured_cache_rate_limiter())
self.assertIsNotNone(self.scheduler.cache_rate_limiter)
CONF.set_override('eject_directive_rate_limit', 3, 'director')
self.scheduler.eject_rate_limiter = (
scheduler.get_configured_ejection_rate_limiter())
self.assertIsNotNone(self.scheduler.eject_rate_limiter)
self.scheduler.issue_directives(None)
# 3 eject node directives, plus 3 cache node directives
self.assertEqual(6, self.issue_action_mock.call_count)
def test_dry_run_on(self):
# Dry-run enabled, so issue_action should not be called on the scout.
CONF.set_override('dry_run', True, 'director')
self.scheduler.issue_directives(None)
self.assertFalse(self.issue_action_mock.called)
def test_dry_run_off(self):
# Dry-run disabled, so issue_action will be called.
CONF.set_override('dry_run', False, 'director')
self.scheduler.issue_directives(None)
self.assertTrue(self.issue_action_mock.called)
@mock.patch('arsenal.strategy.base.log_overall_node_statistics')
def test_log_statistics_on(self, log_mock):
CONF.set_override('log_statistics', True, 'director')
self.scheduler.issue_directives(None)
self.assertTrue(log_mock.called)
@mock.patch('arsenal.strategy.base.log_overall_node_statistics')
def test_log_statistics_off(self, log_mock):
CONF.set_override('log_statistics', False, 'director')
self.scheduler.issue_directives(None)
self.assertFalse(log_mock.called)
def test_issue_directives_empty_data_causes_strategy_suspension(self):
self.scheduler.node_data = []
self.scheduler.flavor_data = FAKE_FLAVOR_DATA
self.scheduler.image_data = FAKE_IMAGE_DATA
suspension_test_cases = (([], FAKE_FLAVOR_DATA, FAKE_IMAGE_DATA),
(FAKE_NODE_DATA, [], FAKE_IMAGE_DATA),
(FAKE_NODE_DATA, FAKE_FLAVOR_DATA, []),
([], [], FAKE_IMAGE_DATA),
(FAKE_NODE_DATA, [], []))
for case in suspension_test_cases:
self.onmetal_scout_mock.retrieve_node_data.return_value = case[0]
self.scheduler.flavor_data = case[1]
self.scheduler.image_data = case[2]
self.scheduler.strat.directives = mock.Mock()
self.scheduler.issue_directives(None)
self.assertFalse(self.scheduler.strat.directives.called)
|
{
"content_hash": "56f8f3f83c0366e59a1bfbfc3ff00e64",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 78,
"avg_line_length": 43.06748466257669,
"alnum_prop": 0.653988603988604,
"repo_name": "rackerlabs/arsenal",
"id": "9c3c5e6e553a9f5a766f529510e8bc1c9cfda506",
"size": "7670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arsenal/tests/unit/director/test_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "190461"
},
{
"name": "Shell",
"bytes": "8033"
}
],
"symlink_target": ""
}
|
import unittest
from django import forms
from django.forms.utils import ErrorList
from django.core.exceptions import ValidationError
from wagtail.wagtailcore import blocks
class TestFieldBlock(unittest.TestCase):
def test_charfield_render(self):
block = blocks.CharBlock()
html = block.render("Hello world!")
self.assertEqual(html, "Hello world!")
def test_charfield_render_form(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!")
self.assertIn('<div class="field char_field widget-text_input">', html)
self.assertIn('<input id="" name="" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_prefix(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!", prefix='foo')
self.assertIn('<input id="foo" name="foo" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_error(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!",
errors=ErrorList([ValidationError("This field is required.")])
)
self.assertIn('This field is required.', html)
def test_charfield_searchable_content(self):
block = blocks.CharBlock()
content = block.get_searchable_content("Hello world!")
self.assertEqual(content, ["Hello world!"])
def test_choicefield_render(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render('choice-2')
self.assertEqual(html, "choice-2")
def test_choicefield_render_form(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render_form('choice-2')
self.assertIn('<div class="field choice_field widget-select">', html)
self.assertIn('<select id="" name="" placeholder="">', html)
self.assertIn('<option value="choice-1">Choice 1</option>', html)
self.assertIn('<option value="choice-2" selected="selected">Choice 2</option>', html)
@unittest.expectedFailure # Returning "choice-1" instead of "Choice 1"
def test_choicefield_searchable_content(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
content = block.get_searchable_content("choice-1")
self.assertEqual(content, ["Choice 1"])
class TestChoiceBlock(unittest.TestCase):
def setUp(self):
from django.db.models.fields import BLANK_CHOICE_DASH
self.blank_choice_dash_label = BLANK_CHOICE_DASH[0][1]
def test_render_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
html = block.render_form('coffee', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should still be rendered for required fields
# (we may want it as an initial value)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
with self.assertRaises(ValidationError):
block.clean('')
with self.assertRaises(ValidationError):
block.clean(None)
def test_render_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
self.assertEqual(block.clean(''), '')
self.assertEqual(block.clean(None), '')
def test_render_choice_block_with_existing_blank_choice(self):
block = blocks.ChoiceBlock(
choices=[('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')],
required=False)
html = block.render_form(None, prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee">Coffee</option>', html)
def test_named_groups_without_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
])
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea" selected="selected">Tea</option>', html)
def test_named_groups_with_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
('Not thirsty', [
('', 'No thanks')
]),
],
required=False)
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="" selected="selected">No thanks</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea" selected="selected">Tea</option>', html)
def test_subclassing(self):
class BeverageChoiceBlock(blocks.ChoiceBlock):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = BeverageChoiceBlock(required=False)
html = block.render_form('tea', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea" selected="selected">Tea</option>', html)
# subclasses of ChoiceBlock should deconstruct to a basic ChoiceBlock for migrations
self.assertEqual(
block.deconstruct(),
(
'wagtail.wagtailcore.blocks.ChoiceBlock',
[],
{
'choices': [('tea', 'Tea'), ('coffee', 'Coffee')],
'required': False,
},
)
)
class TestMeta(unittest.TestCase):
def test_set_template_with_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock()
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor(self):
block = blocks.CharBlock(template='heading.html')
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor_overrides_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock(template='subheading.html')
self.assertEqual(block.meta.template, 'subheading.html')
def test_meta_multiple_inheritance(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
test = 'Foo'
class SubHeadingBlock(HeadingBlock):
class Meta:
template = 'subheading.html'
block = SubHeadingBlock()
self.assertEqual(block.meta.template, 'subheading.html')
self.assertEqual(block.meta.test, 'Foo')
class TestStructBlock(unittest.TestCase):
def test_initialisation(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass_with_extra(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock([
('classname', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_initialisation_with_multiple_subclassses(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StyledLinkBlock(LinkBlock):
classname = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
@unittest.expectedFailure # Field order doesn't match inheritance order
def test_initialisation_with_mixins(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StylingMixin(blocks.StructBlock):
classname = blocks.CharBlock()
class StyledLinkBlock(LinkBlock, StylingMixin):
pass
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
})
self.assertIn('<dt>title</dt>', html)
self.assertIn('<dd>Wagtail site</dd>', html)
self.assertIn('<dt>link</dt>', html)
self.assertIn('<dd>http://www.wagtail.io</dd>', html)
@unittest.expectedFailure
def test_render_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
})
self.assertIn('<dt>title</dt>', html)
self.assertIn('<dd>Wagtail site</dd>', html)
self.assertIn('<dt>link</dt>', html)
self.assertIn('<dd>http://www.wagtail.io</dd>', html)
# Don't render the extra item
self.assertNotIn('<dt>image</dt>', html)
def test_render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render_form({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}, prefix='mylink')
self.assertIn('<div class="struct-block">', html)
self.assertIn('<div class="field char_field widget-text_input fieldname-title">', html)
self.assertIn('<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Wagtail site" />', html)
self.assertIn('<div class="field url_field widget-url_input fieldname-link">', html)
self.assertIn('<input id="mylink-link" name="mylink-link" placeholder="Link" type="url" value="http://www.wagtail.io" />', html)
def test_render_form_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render_form({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
}, prefix='mylink')
self.assertIn('<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Wagtail site" />', html)
self.assertIn('<input id="mylink-link" name="mylink-link" placeholder="Link" type="url" value="http://www.wagtail.io" />', html)
# Don't render the extra field
self.assertNotIn('mylink-image', html)
def test_render_form_uses_default_value(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
html = block.render_form({}, prefix='mylink')
self.assertIn('<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Torchbox" />', html)
self.assertIn('<input id="mylink-link" name="mylink-link" placeholder="Link" type="url" value="http://www.torchbox.com" />', html)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class LinkBlock(blocks.StructBlock):
title = ScriptedCharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class LinkBlock(blocks.StructBlock):
title = CharBlockWithDeclarations(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
content = block.get_searchable_content({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
})
self.assertEqual(content, ["Wagtail site"])
class TestListBlock(unittest.TestCase):
def test_initialise_with_class(self):
block = blocks.ListBlock(blocks.CharBlock)
# Child block should be initialised for us
self.assertIsInstance(block.child_block, blocks.CharBlock)
def test_initialise_with_instance(self):
child_block = blocks.CharBlock()
block = blocks.ListBlock(child_block)
self.assertEqual(block.child_block, child_block)
def render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
return block.render([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
def test_render_uses_ul(self):
html = self.render()
self.assertIn('<ul>', html)
self.assertIn('</ul>', html)
def test_render_uses_li(self):
html = self.render()
self.assertIn('<li>', html)
self.assertIn('</li>', html)
def render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
]
, prefix='links')
return html
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="sequence">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="links-count" id="links-count" value="2">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-deleted" name="links-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-order" name="links-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="links-1-order" name="links-1-order" value="1">', html)
def test_render_form_labels(self):
html = self.render_form()
self.assertIn('<label for=links-0-value-title>Title</label>', html)
self.assertIn('<label for=links-0-value-link>Link</label>', html)
def test_render_form_values(self):
html = self.render_form()
self.assertIn('<input id="links-0-value-title" name="links-0-value-title" placeholder="Title" type="text" value="Wagtail" />', html)
self.assertIn('<input id="links-0-value-link" name="links-0-value-link" placeholder="Link" type="url" value="http://www.wagtail.io" />', html)
self.assertIn('<input id="links-1-value-title" name="links-1-value-title" placeholder="Title" type="text" value="Django" />', html)
self.assertIn('<input id="links-1-value-link" name="links-1-value-link" placeholder="Link" type="url" value="http://www.djangoproject.com" />', html)
def test_html_declarations(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertIn('<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title" type="text" />', html)
self.assertIn('<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link" type="url" />', html)
def test_html_declarations_uses_default(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Github")
link = blocks.URLBlock(default="http://www.github.com")
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertIn('<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title" type="text" value="Github" />', html)
self.assertIn('<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link" type="url" value="http://www.github.com" />', html)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
block = blocks.ListBlock(ScriptedCharBlock())
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
block = blocks.ListBlock(CharBlockWithDeclarations())
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
content = block.get_searchable_content([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
self.assertEqual(content, ["Wagtail", "Django"])
class TestStreamBlock(unittest.TestCase):
def test_initialisation(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_with_binary_string_names(self):
# migrations will sometimes write out names as binary strings, just to keep us on our toes
block = blocks.StreamBlock([
(b'heading', blocks.CharBlock()),
(b'paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), [b'heading', b'paragraph'])
def test_initialisation_from_subclass(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_from_subclass_with_extra(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock([
('intro', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def test_initialisation_with_multiple_subclassses(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class ArticleWithIntroBlock(ArticleBlock):
intro = blocks.CharBlock()
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
@unittest.expectedFailure # Field order doesn't match inheritance order
def test_initialisation_with_mixins(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class IntroMixin(blocks.StreamBlock):
intro = blocks.CharBlock()
class ArticleWithIntroBlock(ArticleBlock, IntroMixin):
pass
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def render_article(self, data):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python(data)
return block.render(value)
def test_render(self):
html = self.render_article([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
self.assertIn('<div class="block-heading">My title</div>', html)
self.assertIn('<div class="block-paragraph">My first paragraph</div>', html)
self.assertIn('<div class="block-paragraph">My second paragraph</div>', html)
def test_render_unknown_type(self):
# This can happen if a developer removes a type from their StreamBlock
html = self.render_article([
{
'type': 'foo',
'value': "Hello",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
])
self.assertNotIn('foo', html)
self.assertNotIn('Hello', html)
self.assertIn('<div class="block-paragraph">My first paragraph</div>', html)
def render_form(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
return block.render_form(value, prefix='myarticle')
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="sequence">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="myarticle-count" id="myarticle-count" value="3">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-deleted" name="myarticle-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-order" name="myarticle-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="myarticle-1-order" name="myarticle-1-order" value="1">', html)
self.assertIn('<input type="hidden" id="myarticle-2-order" name="myarticle-2-order" value="2">', html)
def test_render_form_type_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-type" name="myarticle-0-type" value="heading">', html)
self.assertIn('<input type="hidden" id="myarticle-1-type" name="myarticle-1-type" value="paragraph">', html)
self.assertIn('<input type="hidden" id="myarticle-2-type" name="myarticle-2-type" value="paragraph">', html)
def test_render_form_value_fields(self):
html = self.render_form()
self.assertIn('<input id="myarticle-0-value" name="myarticle-0-value" placeholder="Heading" type="text" value="My title" />', html)
self.assertIn('<input id="myarticle-1-value" name="myarticle-1-value" placeholder="Paragraph" type="text" value="My first paragraph" />', html)
self.assertIn('<input id="myarticle-2-value" name="myarticle-2-value" placeholder="Paragraph" type="text" value="My second paragraph" />', html)
def test_html_declarations(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
html = block.html_declarations()
self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading" type="text" />', html)
self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text" />', html)
def test_html_declarations_uses_default(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock(default="Fish found on moon")
paragraph = blocks.CharBlock(default="Lorem ipsum dolor sit amet")
block = ArticleBlock()
html = block.html_declarations()
self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading" type="text" value="Fish found on moon" />', html)
self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text" value="Lorem ipsum dolor sit amet" />', html)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class ArticleBlock(blocks.StreamBlock):
heading = ScriptedCharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class ArticleBlock(blocks.StreamBlock):
heading = CharBlockWithDeclarations(default="Torchbox")
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_ordering_in_form_submission(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
# check that items are ordered by the 'order' field, not the order they appear in the form
post_data = {'article-count': '3'}
for i in range(0, 3):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(2 - i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 0")
# check that items are ordered by 'order' numerically, not alphabetically
post_data = {'article-count': '12'}
for i in range(0, 12):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 2")
def test_searchable_content(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
content = block.get_searchable_content(value)
self.assertEqual(content, [
"My title",
"My first paragraph",
"My second paragraph",
])
|
{
"content_hash": "f9d78b9fd5dc9d744a48832def3b88a0",
"timestamp": "",
"source": "github",
"line_count": 872,
"max_line_length": 157,
"avg_line_length": 38.518348623853214,
"alnum_prop": 0.5878885316184351,
"repo_name": "dresiu/wagtail",
"id": "f1a839af4229f8f73010453993f3d05aeff9d25f",
"size": "33588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtailcore/tests/test_blocks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "148951"
},
{
"name": "HTML",
"bytes": "238001"
},
{
"name": "JavaScript",
"bytes": "78888"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1153570"
},
{
"name": "Ruby",
"bytes": "1275"
},
{
"name": "Shell",
"bytes": "11292"
}
],
"symlink_target": ""
}
|
"""Type classes for LIT inputs and outputs.
These are simple dataclasses used in model.input_spec() and model.output_spec()
to describe the semantics of the model outputs, while allowing clients to still
use flexible data structures.
These are used by the LIT framework to configure front-end components and to
enable different generation and visualization modules. For example, the input
spec allows LIT to automatically generate input forms for common types like text
segments or class labels, while the output spec describes how the model output
should be rendered.
"""
import abc
from typing import Any, Dict, List, NewType, Optional, Sequence, Text, Tuple, Union
import attr
JsonDict = Dict[Text, Any]
Input = JsonDict # TODO(lit-dev): stronger typing using NewType
IndexedInput = NewType("IndexedInput", JsonDict) # has keys: id, data, meta
ExampleId = Text
TokenTopKPredsList = List[List[Tuple[str, float]]]
##
# Base classes, for common functionality and type grouping.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class LitType(metaclass=abc.ABCMeta):
"""Base class for LIT Types."""
required: bool = True # for input fields, mark if required by the model.
annotated: bool = False # If this type is created from an Annotator.
# TODO(lit-dev): Add defaults for all LitTypes
default = None # an optional default value for a given type.
def is_compatible(self, other):
"""Check equality, ignoring some fields."""
# We allow this class to be a subclass of the other.
if not isinstance(self, type(other)):
return False
d1 = attr.asdict(self)
d1.pop("required", None)
d2 = attr.asdict(other)
d2.pop("required", None)
return d1 == d2
def to_json(self) -> JsonDict:
"""Used by serialize.py."""
d = attr.asdict(self)
d["__class__"] = "LitType"
d["__name__"] = self.__class__.__name__
# All parent classes, from method resolution order (mro).
# Use this to check inheritance on the frontend.
d["__mro__"] = [a.__name__ for a in self.__class__.__mro__]
return d
@staticmethod
def from_json(d: JsonDict):
"""Used by serialize.py."""
cls = globals()[d.pop("__name__")] # class by name from this module
del d["__mro__"]
return cls(**d)
Spec = Dict[Text, LitType]
# Attributes that should be treated as a reference to other fields.
FIELD_REF_ATTRIBUTES = frozenset(
{"parent", "align", "align_in", "align_out", "grad_for"})
def _remap_leaf(leaf: LitType, keymap: Dict[str, str]) -> LitType:
"""Remap any field references on a LitType."""
d = attr.asdict(leaf) # mutable
d = {
k: (keymap.get(v, v) if k in FIELD_REF_ATTRIBUTES else v)
for k, v in d.items()
}
return leaf.__class__(**d)
def remap_spec(spec: Spec, keymap: Dict[str, str]) -> Spec:
"""Rename fields in a spec, with a best-effort to also remap field references."""
ret = {}
for k, v in spec.items():
new_key = keymap.get(k, k)
new_value = _remap_leaf(v, keymap)
ret[new_key] = new_value
return ret
##
# Concrete type clases
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class String(LitType):
"""User-editable text input.
All automated edits are disabled for this type.
Mainly used for string inputs that have special formatting, and should only
be edited manually.
"""
default: Text = ""
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class TextSegment(LitType):
"""Text input (untokenized), a single string."""
default: Text = ""
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class ImageBytes(LitType):
"""An image, an encoded base64 ascii string (starts with 'data:image...')."""
pass
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class GeneratedText(TextSegment):
"""Generated (untokenized) text."""
# Name of a TextSegment field to evaluate against
parent: Optional[Text] = None
ScoredTextCandidates = List[Tuple[str, Optional[float]]]
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class GeneratedTextCandidates(TextSegment):
"""Multiple candidates for GeneratedText; values are List[(text, score)]."""
# Name of a TextSegment field to evaluate against
parent: Optional[Text] = None
@staticmethod
def top_text(value: ScoredTextCandidates) -> str:
return value[0][0] if len(value) else ""
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class ReferenceTexts(LitType):
"""Multiple candidates for TextSegment; values are List[(text, score)]."""
pass
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class URL(TextSegment):
"""TextSegment that should be interpreted as a URL."""
pass
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class SearchQuery(TextSegment):
"""TextSegment that should be interpreted as a search query."""
pass
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class Tokens(LitType):
"""Tokenized text, as List[str]."""
default: List[Text] = attr.Factory(list)
# Name of a TextSegment field from the input
# TODO(b/167617375): should we use 'align' here?
parent: Optional[Text] = None
mask_token: Optional[Text] = None # optional mask token for input
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class TokenTopKPreds(LitType):
"""Predicted tokens, as from a language model.
Data should be a List[List[Tuple[str, float]]], where the inner list contains
(word, probability) in descending order.
"""
align: Text = None # name of a Tokens field in the model output
parent: Optional[Text] = None
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class Scalar(LitType):
"""Scalar value, a single float or int."""
min_val: float = 0
max_val: float = 1
default: float = 0
step: float = .01
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class RegressionScore(Scalar):
"""Regression score, a single float."""
# name of a Scalar or RegressionScore field in input
parent: Optional[Text] = None
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class ReferenceScores(LitType):
"""Score of one or more target sequences, as List[float]."""
# name of a TextSegment or ReferenceTexts field in the input
parent: Optional[Text] = None
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class CategoryLabel(LitType):
"""Category or class label, a single string."""
# Optional vocabulary to specify allowed values.
# If omitted, any value is accepted.
vocab: Optional[Sequence[Text]] = None # label names
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class MulticlassPreds(LitType):
"""Multiclass predicted probabilities, as <float>[num_labels]."""
# Vocabulary is required here for decoding model output.
# Usually this will match the vocabulary in the corresponding label field.
vocab: Sequence[Text] # label names
null_idx: Optional[int] = None # vocab index of negative (null) label
parent: Optional[Text] = None # CategoryLabel field in input
autosort: Optional[bool] = False # Enable automatic sorting
@property
def num_labels(self):
return len(self.vocab)
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class SequenceTags(LitType):
"""Sequence tags, aligned to tokens.
The data should be a list of string labels, one for each token.
"""
align: Text # name of Tokens field
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class SpanLabels(LitType):
"""Span labels, a List[dtypes.SpanLabel] aligned to tokens.
Span labels can cover more than one token, may not cover all tokens in the
sentence, and may overlap with each other.
"""
align: Text # name of Tokens field
parent: Optional[Text] = None
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class EdgeLabels(LitType):
"""Edge labels, a List[dtypes.EdgeLabel] between pairs of spans.
This is a general form for structured prediction output; each entry consists
of (span1, span2, label). See
https://arxiv.org/abs/1905.06316 (Tenney et al. 2019) and
https://github.com/nyu-mll/jiant/tree/master/probing#data-format for more
details.
"""
align: Text # name of Tokens field
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class MultiSegmentAnnotations(LitType):
"""Very general type for in-line text annotations, as List[AnnotationCluster].
This is a more general version of SpanLabel, EdgeLabel, and other annotation
types, designed to represent annotations that may span multiple segments.
The basic unit is dtypes.AnnotationCluster, which contains a label, optional
score, and one or more SpanLabel annotations, each of which points to a
specific segment from the input.
TODO(lit-dev): by default, spans are treated as bytes in this context.
Make this configurable, if some spans need to refer to tokens instead.
"""
exclusive: bool = False # if true, treat as candidate list
background: bool = False # if true, don't emphasize in visualization
##
# Model internals, for interpretation.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class Embeddings(LitType):
"""Embeddings or model activations, as fixed-length <float>[emb_dim]."""
pass
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class Gradients(LitType):
"""Gradients with respect to embeddings."""
grad_for: Optional[Text] = None # name of Embeddings field
# Name of the field in the input that can be used to specify the target class
# for the gradients.
grad_target_field_key: Optional[Text] = None
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class TokenEmbeddings(LitType):
"""Per-token embeddings, as <float>[num_tokens, emb_dim]."""
align: Optional[Text] = None # name of a Tokens field
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class TokenGradients(LitType):
"""Gradients with respect to per-token inputs, as <float>[num_tokens, emb_dim]."""
align: Optional[Text] = None # name of a Tokens field
grad_for: Optional[Text] = None # name of TokenEmbeddings field
# Name of the field in the input that can be used to specify the target class
# for the gradients.
grad_target_field_key: Optional[Text] = None
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class ImageGradients(LitType):
"""Gradients with respect to per-pixel inputs, as a multidimensional array."""
# Name of the field in the input for which the gradients are computed.
align: Optional[Text] = None
# Name of the field in the input that can be used to specify the target class
# for the gradients.
grad_target_field_key: Optional[Text] = None
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class AttentionHeads(LitType):
"""One or more attention heads, as <float>[num_heads, num_tokens, num_tokens]."""
# input and output Tokens fields; for self-attention these can be the same
align_in: Text
align_out: Text
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class SubwordOffsets(LitType):
"""Offsets to align input tokens to wordpieces or characters, as List[int].
offsets[i] should be the index of the first wordpiece for input token i.
"""
align_in: Text # name of field in data spec
align_out: Text # name of field in model output spec
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class SparseMultilabel(LitType):
"""Sparse multi-label represented as a list of strings, as List[str]."""
vocab: Optional[Sequence[Text]] = None # label names
default: Sequence[Text] = []
# TODO(b/162269499) Migrate non-comma separators to custom type.
separator: Text = "," # Used for display purposes.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class SparseMultilabelPreds(LitType):
"""Sparse multi-label predictions represented as a list of tuples.
The tuples are of the label and the score. So as a List[(str, float)].
"""
vocab: Optional[Sequence[Text]] = None # label names
parent: Optional[Text] = None
default: Sequence[Text] = []
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class FieldMatcher(LitType):
"""For matching spec fields.
The front-end will perform spec matching and fill in the vocab field
accordingly. UI will materialize this to a dropdown-list.
Use MultiFieldMatcher when your intent is selecting more than one field in UI.
"""
spec: Text # which spec to check, 'dataset', 'input', or 'output'.
types: Union[Text, Sequence[Text]] # types of LitType to match in the spec.
vocab: Optional[Sequence[Text]] = None # names matched from the spec.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class MultiFieldMatcher(LitType):
"""For matching spec fields.
The front-end will perform spec matching and fill in the vocab field
accordingly. UI will materialize this to multiple checkboxes. Use this when
the user needs to pick more than one field in UI.
"""
spec: Text # which spec to check, 'dataset', 'input', or 'output'.
types: Union[Text, Sequence[Text]] # types of LitType to match in the spec.
vocab: Optional[Sequence[Text]] = None # names matched from the spec.
default: Sequence[Text] = [] # default names of selected items.
select_all: bool = False # Select all by default (overriddes default).
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class TokenSalience(LitType):
"""Metadata about a returned token salience map, returned as dtypes.TokenSalience."""
autorun: bool = False # If the saliency technique is automatically run.
signed: bool # If the returned values are signed.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class FeatureSalience(LitType):
"""Metadata about a returned feature salience map, returned as dtypes.FeatureSalience."""
autorun: bool = True # If the saliency technique is automatically run.
signed: bool # If the returned values are signed.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class ImageSalience(LitType):
"""Metadata about a returned image saliency.
The data is returned as an image in the base64 URL encoded format, e.g.,
data:image/jpg;base64,w4J3k1Bfa...
"""
autorun: bool = False # If the saliency technique is automatically run.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class SequenceSalience(LitType):
"""Metadata about a returned sequence salience map, returned as dtypes.SequenceSalienceMap."""
autorun: bool = False # If the saliency technique is automatically run.
signed: bool # If the returned values are signed.
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class Boolean(LitType):
"""Boolean value."""
default: bool = False
|
{
"content_hash": "33b32f3ef2e5e4b7b7127cf46c5cb099",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 96,
"avg_line_length": 34.5938242280285,
"alnum_prop": 0.7131969239219994,
"repo_name": "pair-code/lit",
"id": "af193f20f1989862867696e178f3eb9d2d56224f",
"size": "15238",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lit_nlp/api/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57958"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "1408"
},
{
"name": "JavaScript",
"bytes": "48969"
},
{
"name": "Liquid",
"bytes": "13294"
},
{
"name": "Python",
"bytes": "392936"
},
{
"name": "Shell",
"bytes": "1893"
},
{
"name": "TypeScript",
"bytes": "506076"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import airflow
from airflow.operators.python_operator import PythonOperator
from airflow.models import DAG
import os
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
dag = DAG(
dag_id='example_kubernetes_executor', default_args=args,
schedule_interval=None
)
affinity = {
'podAntiAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': [
{
'topologyKey': 'kubernetes.io/hostname',
'labelSelector': {
'matchExpressions': [
{
'key': 'app',
'operator': 'In',
'values': ['airflow']
}
]
}
}
]
}
}
tolerations = [{
'key': 'dedicated',
'operator': 'Equal',
'value': 'airflow'
}]
def print_stuff():
print("stuff!")
def use_zip_binary():
rc = os.system("zip")
assert rc == 0
# You don't have to use any special KubernetesExecutor configuration if you don't want to
start_task = PythonOperator(
task_id="start_task", python_callable=print_stuff, dag=dag
)
# But you can if you want to
one_task = PythonOperator(
task_id="one_task", python_callable=print_stuff, dag=dag,
executor_config={"KubernetesExecutor": {"image": "airflow/ci:latest"}}
)
# Use the zip binary, which is only found in this special docker image
two_task = PythonOperator(
task_id="two_task", python_callable=use_zip_binary, dag=dag,
executor_config={"KubernetesExecutor": {"image": "airflow/ci_zip:latest"}}
)
# Limit resources on this operator/task with node affinity & tolerations
three_task = PythonOperator(
task_id="three_task", python_callable=print_stuff, dag=dag,
executor_config={
"KubernetesExecutor": {"request_memory": "128Mi",
"limit_memory": "128Mi",
"tolerations": tolerations,
"affinity": affinity}}
)
start_task.set_downstream([one_task, two_task, three_task])
|
{
"content_hash": "d8ff7f5fe6b4259c3106072f9abc7ecb",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 89,
"avg_line_length": 27.354430379746834,
"alnum_prop": 0.5761221656640444,
"repo_name": "fenglu-g/incubator-airflow",
"id": "d03e255ab3287504083e0a038740f07da9db5d86",
"size": "2972",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/contrib/example_dags/example_kubernetes_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "3634"
},
{
"name": "HTML",
"bytes": "129454"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5852162"
},
{
"name": "Shell",
"bytes": "41793"
}
],
"symlink_target": ""
}
|
from tempest.api.compute import base
from tempest import test
class AZAdminV3Test(base.BaseComputeAdminTest):
"""
Tests Availability Zone API List
"""
_api_version = 3
@classmethod
def setUpClass(cls):
super(AZAdminV3Test, cls).setUpClass()
cls.client = cls.os_adm.availability_zone_client
@test.attr(type='gate')
def test_get_availability_zone_list(self):
# List of availability zone
resp, availability_zone = self.client.get_availability_zone_list()
self.assertEqual(200, resp.status)
self.assertTrue(len(availability_zone) > 0)
@test.attr(type='gate')
def test_get_availability_zone_list_detail(self):
# List of availability zones and available services
resp, availability_zone = \
self.client.get_availability_zone_list_detail()
self.assertEqual(200, resp.status)
self.assertTrue(len(availability_zone) > 0)
class AZAdminV2TestJSON(AZAdminV3Test):
_api_version = 2
class AZAdminV2TestXML(AZAdminV2TestJSON):
_interface = 'xml'
|
{
"content_hash": "344bb7e925eb662d5054b28c0726726f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 29.18918918918919,
"alnum_prop": 0.6787037037037037,
"repo_name": "vmahuli/tempest",
"id": "9555367bbdc4be08a7b3dfc1ae2c2067d3165532",
"size": "1711",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/compute/admin/test_availability_zone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3087389"
},
{
"name": "Shell",
"bytes": "17930"
}
],
"symlink_target": ""
}
|
"""
Configures MySQLdb driver-specific aspects of the db layer.
"""
def process_dsn(dsn):
"""
Take a standard DSN-dict and return the args and
kwargs that will be passed to the MySQLdb Connection
constructor.
We use the t.e.adbapi.ConnectionPool kwargs here to
provide a callback that fixes broken connections,
and set the default cursorclass.
"""
dsn['cp_openfun'] = fix_mysqldb
from MySQLdb import cursors
dsn['cursorclass'] = cursors.SSDictCursor
# I'm just not sure whether these make things worse or better
# dsn['use_unicode'] = True
# dsn['charset'] = 'utf8'
return [], dsn
def fix_mysqldb(connection):
"""
This function takes a MySQLdb connection object and replaces
character_set_name() if the version of MySQLdb < 1.2.2.
It also enables NO_BACKSLASH_ESCAPES per session.
"""
cur = connection.cursor()
cur.execute("SET SESSION sql_mode='NO_BACKSLASH_ESCAPES'")
cur.close()
from distutils.version import LooseVersion
import MySQLdb
if(LooseVersion(MySQLdb.__version__) < LooseVersion('1.2.2')):
def _yes_utf8_really(self):
return 'utf8'
instancemethod = type(_DummyClass._dummy_method)
connection.character_set_name = instancemethod(_yes_utf8_really, connection, connection.__class__)
class _DummyClass(object):
"""
Dummy class used to override an instance method on MySQLdb connection object.
"""
def _dummy_method(self):
"""
Sample instance method for hackery.
"""
pass
|
{
"content_hash": "bc4bbab5076d20edeed6ee57fd3d1643",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 100,
"avg_line_length": 26.272727272727273,
"alnum_prop": 0.7231833910034602,
"repo_name": "philchristensen/modu",
"id": "004506133184d3fbb0b774c6a7137e26890543ff",
"size": "1556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/modu/persist/dbapi_MySQLdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "105"
},
{
"name": "CSS",
"bytes": "12009"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "56109"
},
{
"name": "Python",
"bytes": "468377"
}
],
"symlink_target": ""
}
|
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
self.assertMatch("'tuple' object does not support item assignment", ex[0])
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three.append("boom")
except Exception as ex:
self.assertEqual(AttributeError, type(ex))
# Note, assertMatch() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertMatch("'tuple' object has no attribute 'append'", ex[0])
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, 'boom'), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(int, (1).__class__)
self.assertEqual(tuple, (1,).__class__)
self.assertEqual(tuple, ("Hello comma!", ).__class__)
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual((), ())
self.assertEqual((), tuple()) # Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(place, place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append(
("Cthulhu", (26, 40, 1, 'N'), (70, 45, 7, 'W'))
)
self.assertEqual("Cthulhu", locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
|
{
"content_hash": "fa64d598baec24981fc377b8dc6f28d0",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 91,
"avg_line_length": 35.61194029850746,
"alnum_prop": 0.5670578373847444,
"repo_name": "pnichols104/python-koans",
"id": "b1c522a111ed313839a4313b3e64a36437af65bc",
"size": "2433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_tuples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317887"
},
{
"name": "Shell",
"bytes": "1599"
}
],
"symlink_target": ""
}
|
import json
import os
import socket
import httplib2
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class HTTPError(Exception):
"""
An error response from the API server. This should be an
HTTP error of some kind (404, 500, etc).
"""
def __init__(self, content, status=None, reason=None, path=None, body=None):
#HTTP status code
self.status = status
# human readable HTTP status
self.reason = reason
self.path = path
self.body = body
self.detail = None
# Actual, useful reason for failure returned by RabbitMQ
self.detail=None
if content and content.get('reason'):
self.detail = content['reason']
self.output = "%s - %s (%s) (%s) (%s)" % (self.status,
self.reason,
self.detail,
self.path,
repr(self.body))
def __str__(self):
return self.output
class NetworkError(Exception):
"""Denotes a failure to communicate with the REST API
"""
pass
class HTTPClient(object):
"""
A wrapper for (currently) httplib2. Abstracts away
things like path building, return value parsing, etc.,
so the api module code stays clean and easy to read/use.
"""
def __init__(self, server, uname, passwd, timeout=5):
"""
:param string server: 'host:port' string denoting the location of the
broker and the port for interfacing with its REST API.
:param string uname: Username credential used to authenticate.
:param string passwd: Password used to authenticate w/ REST API
:param int timeout: Integer number of seconds to wait for each call.
"""
self.client = httplib2.Http(timeout=timeout)
self.client.add_credentials(uname, passwd)
self.base_url = 'http://%s/api/' % server
def decode_json_content(self, content):
"""
Returns the JSON-decoded Python representation of 'content'.
:param json content: A Python JSON object.
"""
try:
py_ct = json.loads(content)
except ValueError as out:
# If there's a 404 or other error, the response will not be JSON.
return None
except TypeError:
# in later Python 3.x versions, some calls return bytes objects.
py_ct = json.loads(content.decode())
return py_ct
def do_call(self, path, reqtype, body=None, headers=None):
"""
Send an HTTP request to the REST API.
:param string path: A URL
:param string reqtype: The HTTP method (GET, POST, etc.) to use
in the request.
:param string body: A string representing any data to be sent in the
body of the HTTP request.
:param dictionary headers:
"{header-name: header-value}" dictionary.
"""
url = urljoin(self.base_url, path)
try:
resp, content = self.client.request(url,
reqtype,
body,
headers)
except socket.timeout as out:
raise NetworkError("Timout while trying to connect to RabbitMQ")
except Exception as out:
# net-related exception types from httplib2 are unpredictable.
raise NetworkError("Error: %s %s" % (type(out), out))
# RabbitMQ will return content even on certain failures.
if content:
content = self.decode_json_content(content)
# 'success' HTTP status codes are 200-206
if resp.status < 200 or resp.status > 206:
raise HTTPError(content, resp.status, resp.reason, path, body)
else:
if content:
return content
else:
return None
|
{
"content_hash": "3451bf85acad50299dd49ac920355ccc",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 80,
"avg_line_length": 33.34426229508197,
"alnum_prop": 0.5599803343166175,
"repo_name": "vshn/pyrabbit-debian",
"id": "15630c3e5bd60bf5353d1796cdab339b389bbc46",
"size": "4069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrabbit/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "48928"
}
],
"symlink_target": ""
}
|
"""Example DAG demonstrating the DummyOperator and a custom DummySkipOperator which skips by default."""
from airflow import DAG
from airflow.exceptions import AirflowSkipException
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.dates import days_ago
args = {
'owner': 'airflow',
}
# Create some placeholder operators
class DummySkipOperator(DummyOperator):
"""Dummy operator which always skips the task."""
ui_color = '#e8b7e4'
def execute(self, context):
raise AirflowSkipException
def create_test_pipeline(suffix, trigger_rule, dag_):
"""
Instantiate a number of operators for the given DAG.
:param str suffix: Suffix to append to the operator task_ids
:param str trigger_rule: TriggerRule for the join task
:param DAG dag_: The DAG to run the operators on
"""
skip_operator = DummySkipOperator(task_id=f'skip_operator_{suffix}', dag=dag_)
always_true = DummyOperator(task_id=f'always_true_{suffix}', dag=dag_)
join = DummyOperator(task_id=trigger_rule, dag=dag_, trigger_rule=trigger_rule)
final = DummyOperator(task_id=f'final_{suffix}', dag=dag_)
skip_operator >> join
always_true >> join
join >> final
dag = DAG(dag_id='example_skip_dag', default_args=args, start_date=days_ago(2), tags=['example'])
create_test_pipeline('1', 'all_success', dag)
create_test_pipeline('2', 'one_success', dag)
|
{
"content_hash": "1f24e0be65e7f422ac7cf68204aac260",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 104,
"avg_line_length": 32.95348837209303,
"alnum_prop": 0.7148906139731828,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "7a0cc675911c3eb14db82491555f4183545a58f6",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/example_dags/example_skip_dag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
}
|
try:
import astroquery
except ImportError:
print('-----------------------------------------------------------')
print('-----------------------------------------------------------')
print('WARNING: Not loading modules in xastropy.obs except radec. \n Install astroquery if you want them')
print('-----------------------------------------------------------')
else:
import finder
import keck
import lick
import x_getsdssimg
# Non-dependent modules
import radec
|
{
"content_hash": "296c411b8203e32d86e85a3944362ffe",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 111,
"avg_line_length": 33.06666666666667,
"alnum_prop": 0.43951612903225806,
"repo_name": "profxj/old_xastropy",
"id": "59b77ae12a7a197851a4932701fd58c388b6bb2e",
"size": "516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xastropy/obs/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "144038"
},
{
"name": "Python",
"bytes": "826525"
}
],
"symlink_target": ""
}
|
"""
CIFAR-10 dataset.
This module will download dataset from
https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into
paddle reader creators.
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,
with 6000 images per class. There are 50000 training images and 10000 test images.
"""
from PIL import Image
from PIL import ImageOps
import numpy as np
import cPickle
import random
import utils
import paddle.fluid as fluid
import time
import os
import functools
import paddle.reader
__all__ = ['train10', 'test10']
image_size = 32
image_depth = 3
half_length = 8
CIFAR_MEAN = [0.4914, 0.4822, 0.4465]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
def generate_reshape_label(label, batch_size, CIFAR_CLASSES=10):
reshape_label = np.zeros((batch_size, 1), dtype='int32')
reshape_non_label = np.zeros(
(batch_size * (CIFAR_CLASSES - 1), 1), dtype='int32')
num = 0
for i in range(batch_size):
label_i = label[i]
reshape_label[i] = label_i + i * CIFAR_CLASSES
for j in range(CIFAR_CLASSES):
if label_i != j:
reshape_non_label[num] = \
j + i * CIFAR_CLASSES
num += 1
return reshape_label, reshape_non_label
def generate_bernoulli_number(batch_size, CIFAR_CLASSES=10):
rcc_iters = 50
rad_var = np.zeros((rcc_iters, batch_size, CIFAR_CLASSES - 1))
for i in range(rcc_iters):
bernoulli_num = np.random.binomial(size=batch_size, n=1, p=0.5)
bernoulli_map = np.array([])
ones = np.ones((CIFAR_CLASSES - 1, 1))
for batch_id in range(batch_size):
num = bernoulli_num[batch_id]
var_id = 2 * ones * num - 1
bernoulli_map = np.append(bernoulli_map, var_id)
rad_var[i] = bernoulli_map.reshape((batch_size, CIFAR_CLASSES - 1))
return rad_var.astype('float32')
def preprocess(sample, is_training, args):
image_array = sample.reshape(3, image_size, image_size)
rgb_array = np.transpose(image_array, (1, 2, 0))
img = Image.fromarray(rgb_array, 'RGB')
if is_training:
# pad and ramdom crop
img = ImageOps.expand(img, (4, 4, 4, 4), fill=0) # pad to 40 * 40 * 3
left_top = np.random.randint(9, size=2) # rand 0 - 8
img = img.crop((left_top[0], left_top[1], left_top[0] + image_size,
left_top[1] + image_size))
if np.random.randint(2):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img = np.array(img).astype(np.float32)
# per_image_standardization
img_float = img / 255.0
img = (img_float - CIFAR_MEAN) / CIFAR_STD
if is_training and args.cutout:
center = np.random.randint(image_size, size=2)
offset_width = max(0, center[0] - half_length)
offset_height = max(0, center[1] - half_length)
target_width = min(center[0] + half_length, image_size)
target_height = min(center[1] + half_length, image_size)
for i in range(offset_height, target_height):
for j in range(offset_width, target_width):
img[i][j][:] = 0.0
img = np.transpose(img, (2, 0, 1))
return img
def reader_creator_filepath(filename, sub_name, is_training, args):
files = os.listdir(filename)
names = [each_item for each_item in files if sub_name in each_item]
names.sort()
datasets = []
for name in names:
print("Reading file " + name)
batch = cPickle.load(open(filename + name, 'rb'))
data = batch['data']
labels = batch.get('labels', batch.get('fine_labels', None))
assert labels is not None
dataset = zip(data, labels)
datasets.extend(dataset)
random.shuffle(datasets)
def read_batch(datasets, args):
for sample, label in datasets:
im = preprocess(sample, is_training, args)
yield im, [int(label)]
def reader():
batch_data = []
batch_label = []
for data, label in read_batch(datasets, args):
batch_data.append(data)
batch_label.append(label)
if len(batch_data) == args.batch_size:
batch_data = np.array(batch_data, dtype='float32')
batch_label = np.array(batch_label, dtype='int64')
if is_training:
flatten_label, flatten_non_label = \
generate_reshape_label(batch_label, args.batch_size)
rad_var = generate_bernoulli_number(args.batch_size)
mixed_x, y_a, y_b, lam = utils.mixup_data(
batch_data, batch_label, args.batch_size,
args.mix_alpha)
batch_out = [[mixed_x, y_a, y_b, lam, flatten_label, \
flatten_non_label, rad_var]]
yield batch_out
else:
batch_out = [[batch_data, batch_label]]
yield batch_out
batch_data = []
batch_label = []
return reader
def train10(args):
"""
CIFAR-10 training set creator.
It returns a reader creator, each sample in the reader is image pixels in
[0, 1] and label in [0, 9].
:return: Training reader creator
:rtype: callable
"""
return reader_creator_filepath(args.data, 'data_batch', True, args)
def test10(args):
"""
CIFAR-10 test set creator.
It returns a reader creator, each sample in the reader is image pixels in
[0, 1] and label in [0, 9].
:return: Test reader creator.
:rtype: callable
"""
return reader_creator_filepath(args.data, 'test_batch', False, args)
|
{
"content_hash": "f467335f821e848c1f42496f43e627e7",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 82,
"avg_line_length": 34.137724550898206,
"alnum_prop": 0.5884932467988072,
"repo_name": "kuke/models",
"id": "20b32b504e9245c4ff3892f08736d800080daab4",
"size": "6536",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/AutoDL/LRC/reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2013, SMART Technologies ULC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Copyright holder (SMART Technologies ULC) nor
the names of its contributors (Joshua Henn) may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER (SMART Technologies
ULC) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from entity import Entity
from org.sikuli.basics import OS
from org.sikuli.script import KeyModifier, Key, Location
import re
from java.awt.event import InputEvent
from region.exception import FindExhaustedException
from entity.exception import StateFailedException
from sikuli.Sikuli import sleep
from wrapper import Env
from sikuli.Region import Region
import time
import math
import string
import os
from entity.entities import ScrollBar, Button
class Canvas(Entity):
statusCascade = True
offsetY = None
virtualCanvasSize = None
drawingStrategy = None
HORIZONTAL_SCROLLBAR = [ScrollBar, lambda parent, statusCascade=True, **kargs: ScrollBar(parent, parentRegion=parent.parent.region, **kargs)]
EXTEND_PAGE = ['extendPage', lambda parent, **kargs: Button(parent, invalidate=True, statusCascade=False, **kargs)]
@classmethod
def setDefaultDrawingStrategy(cls, drawingStrategy):
cls.drawingStrategy = drawingStrategy
def __init__(self, parent, *args, **kargs):
super(Canvas, self).__init__(parent, *args, **kargs)
# Need to get region for setting virtual canvas size
self.validate()
self.offsetY = 0
self.virtualCanvasSize = self.region.getH()
self.drawingStrategy = self.drawingStrategy(self) # Initialize the drawing strategy
#self[self.HORIZONTAL_SCROLLBAR].gotoTop() Causing validation problems, move elsewhere?
def goto(self, x, y):
self.drawingStrategy.goto(x,y)
def line(self, x1, y1, x2, y2):
assert x1 >= 0
assert x2 >= 0
assert x1 <= self.getW()
assert x2 <= self.getW()
# make sure we can actually draw this (less than height of canvas)
assert math.fabs(y1 - y2) <= self.getH()
# Make sure we have the canvas centered in the right place
self.center(Location(x1,y1))
self.center(Location(x2,y2))
# Draw stuff
self.goto(x1,y1)
self.goto(x2,y2)
def mouseMove(self, x, y):
# We should never be trying to draw outside the canvas area
assert x >= 0
assert y - self.offsetY >= 0
assert x <= self.getW()
assert y - self.offsetY <= self.region.getH()
self.region.mouseMove(Location(self.region.getX() + x, self.region.getY() + y - self.offsetY))
def startDrawing(self):
#self.logger.trace("Starting to draw..")
self.region.mouseDown(InputEvent.BUTTON1_MASK)
def stopDrawing(self):
#self.logger.trace("Stop drawing..")
self.region.mouseUp(InputEvent.BUTTON1_MASK)
def center(self, location):
if isinstance(location, Region):
self.center(Location(location.getX(), location.getY()))
self.center(Location(location.getX() + location.getW(), location.getY() + location.getH()))
return
else:
assert isinstance(location, Location)
# If offset is too high
if location.getY() < self.offsetY:
steps = math.ceil((self.offsetY - location.getY()) / 10)
for i in range(0, steps):
self.offsetY -= 10;
self[Canvas.HORIZONTAL_SCROLLBAR][ScrollBar.UP].click()
# If offset is too low
if location.getY() > (self.offsetY + self.region.getH()):
# Extend the page if it needs to be
if location.getY() > self.virtualCanvasSize:
# Get to the bottom of the page
self[Canvas.HORIZONTAL_SCROLLBAR].gotoBottom()
# Get number of times the page needs to be expanded
steps = math.ceil((location.getY() - self.virtualCanvasSize) / 200)
for j in range(0, steps):
self[Canvas.EXTEND_PAGE].click()
self.virtualCanvasSize += 200 # Think this is right?
# Start back at the top
self[Canvas.HORIZONTAL_SCROLLBAR].gotoTop()
self.offsetY = 0
# Move the page down based on the number of 10 pixels increments to the new Y offset
steps = math.ceil((location.getY() - (self.offsetY + self.region.getH())) / 10)
for i in range(0, steps):
self.offsetY += 10;
self[Canvas.HORIZONTAL_SCROLLBAR][ScrollBar.DOWN].click()
def on(self):
self.drawingStrategy.on()
def off(self):
self.drawingStrategy.off()
def setDrawingStrategy(self, drawingStrategy):
self.drawingStrategy = drawingStrategy(self)
|
{
"content_hash": "01741854d0d72fef9d11dad901c72357",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 150,
"avg_line_length": 38.993975903614455,
"alnum_prop": 0.6326278387146609,
"repo_name": "foo123/sikuli-framework",
"id": "adfa0c8f0fb7af1ba360015921b16b389697be18",
"size": "6473",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/entity/entities/canvas.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "692"
},
{
"name": "Python",
"bytes": "315442"
}
],
"symlink_target": ""
}
|
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_almath', [dirname(__file__)])
except ImportError:
import _almath
return _almath
if fp is not None:
try:
_mod = imp.load_module('_almath', fp, pathname, description)
finally:
fp.close()
return _mod
_almath = swig_import_helper()
del swig_import_helper
else:
import _almath
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
"""Proxy of C++ swig::SwigPyIterator class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _almath.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self):
"""value(self) -> PyObject"""
return _almath.SwigPyIterator_value(self)
def incr(self, n = 1):
"""
incr(self, size_t n = 1) -> SwigPyIterator
incr(self) -> SwigPyIterator
"""
return _almath.SwigPyIterator_incr(self, n)
def decr(self, n = 1):
"""
decr(self, size_t n = 1) -> SwigPyIterator
decr(self) -> SwigPyIterator
"""
return _almath.SwigPyIterator_decr(self, n)
def distance(self, *args):
"""distance(self, SwigPyIterator x) -> ptrdiff_t"""
return _almath.SwigPyIterator_distance(self, *args)
def equal(self, *args):
"""equal(self, SwigPyIterator x) -> bool"""
return _almath.SwigPyIterator_equal(self, *args)
def copy(self):
"""copy(self) -> SwigPyIterator"""
return _almath.SwigPyIterator_copy(self)
def next(self):
"""next(self) -> PyObject"""
return _almath.SwigPyIterator_next(self)
def __next__(self):
"""__next__(self) -> PyObject"""
return _almath.SwigPyIterator___next__(self)
def previous(self):
"""previous(self) -> PyObject"""
return _almath.SwigPyIterator_previous(self)
def advance(self, *args):
"""advance(self, ptrdiff_t n) -> SwigPyIterator"""
return _almath.SwigPyIterator_advance(self, *args)
def __eq__(self, *args):
"""__eq__(self, SwigPyIterator x) -> bool"""
return _almath.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, SwigPyIterator x) -> bool"""
return _almath.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args):
"""__iadd__(self, ptrdiff_t n) -> SwigPyIterator"""
return _almath.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args):
"""__isub__(self, ptrdiff_t n) -> SwigPyIterator"""
return _almath.SwigPyIterator___isub__(self, *args)
def __add__(self, *args):
"""__add__(self, ptrdiff_t n) -> SwigPyIterator"""
return _almath.SwigPyIterator___add__(self, *args)
def __sub__(self, *args):
"""
__sub__(self, ptrdiff_t n) -> SwigPyIterator
__sub__(self, SwigPyIterator x) -> ptrdiff_t
"""
return _almath.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _almath.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class vectorFloat(_object):
"""Proxy of C++ std::vector<(float)> class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorFloat, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorFloat, name)
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _almath.vectorFloat_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _almath.vectorFloat___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _almath.vectorFloat___bool__(self)
def __len__(self):
"""__len__(self) -> size_type"""
return _almath.vectorFloat___len__(self)
def pop(self):
"""pop(self) -> value_type"""
return _almath.vectorFloat_pop(self)
def __getslice__(self, *args):
"""__getslice__(self, difference_type i, difference_type j) -> vectorFloat"""
return _almath.vectorFloat___getslice__(self, *args)
def __setslice__(self, *args):
"""__setslice__(self, difference_type i, difference_type j, vectorFloat v)"""
return _almath.vectorFloat___setslice__(self, *args)
def __delslice__(self, *args):
"""__delslice__(self, difference_type i, difference_type j)"""
return _almath.vectorFloat___delslice__(self, *args)
def __delitem__(self, *args):
"""
__delitem__(self, difference_type i)
__delitem__(self, PySliceObject slice)
"""
return _almath.vectorFloat___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, PySliceObject slice) -> vectorFloat
__getitem__(self, difference_type i) -> value_type
"""
return _almath.vectorFloat___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, PySliceObject slice, vectorFloat v)
__setitem__(self, difference_type i, value_type x)
"""
return _almath.vectorFloat___setitem__(self, *args)
def append(self, *args):
"""append(self, value_type x)"""
return _almath.vectorFloat_append(self, *args)
def empty(self):
"""empty(self) -> bool"""
return _almath.vectorFloat_empty(self)
def size(self):
"""size(self) -> size_type"""
return _almath.vectorFloat_size(self)
def clear(self):
"""clear(self)"""
return _almath.vectorFloat_clear(self)
def swap(self, *args):
"""swap(self, vectorFloat v)"""
return _almath.vectorFloat_swap(self, *args)
def get_allocator(self):
"""get_allocator(self) -> allocator_type"""
return _almath.vectorFloat_get_allocator(self)
def begin(self):
"""begin(self) -> const_iterator"""
return _almath.vectorFloat_begin(self)
def end(self):
"""end(self) -> const_iterator"""
return _almath.vectorFloat_end(self)
def rbegin(self):
"""rbegin(self) -> const_reverse_iterator"""
return _almath.vectorFloat_rbegin(self)
def rend(self):
"""rend(self) -> const_reverse_iterator"""
return _almath.vectorFloat_rend(self)
def pop_back(self):
"""pop_back(self)"""
return _almath.vectorFloat_pop_back(self)
def erase(self, *args):
"""
erase(self, iterator pos) -> iterator
erase(self, iterator first, iterator last) -> iterator
"""
return _almath.vectorFloat_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> vectorFloat
__init__(self, vectorFloat arg0) -> vectorFloat
__init__(self, size_type size) -> vectorFloat
__init__(self, size_type size, value_type value) -> vectorFloat
"""
this = _almath.new_vectorFloat(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args):
"""push_back(self, value_type x)"""
return _almath.vectorFloat_push_back(self, *args)
def front(self):
"""front(self) -> value_type"""
return _almath.vectorFloat_front(self)
def back(self):
"""back(self) -> value_type"""
return _almath.vectorFloat_back(self)
def assign(self, *args):
"""assign(self, size_type n, value_type x)"""
return _almath.vectorFloat_assign(self, *args)
def resize(self, *args):
"""
resize(self, size_type new_size)
resize(self, size_type new_size, value_type x)
"""
return _almath.vectorFloat_resize(self, *args)
def insert(self, *args):
"""
insert(self, iterator pos, value_type x) -> iterator
insert(self, iterator pos, size_type n, value_type x)
"""
return _almath.vectorFloat_insert(self, *args)
def reserve(self, *args):
"""reserve(self, size_type n)"""
return _almath.vectorFloat_reserve(self, *args)
def capacity(self):
"""capacity(self) -> size_type"""
return _almath.vectorFloat_capacity(self)
def __repr__(self):
"""__repr__(self) -> string"""
return _almath.vectorFloat___repr__(self)
__swig_destroy__ = _almath.delete_vectorFloat
__del__ = lambda self : None;
vectorFloat_swigregister = _almath.vectorFloat_swigregister
vectorFloat_swigregister(vectorFloat)
class vectorPosition2D(_object):
"""Proxy of C++ std::vector<(AL::Math::Position2D)> class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorPosition2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorPosition2D, name)
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _almath.vectorPosition2D_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _almath.vectorPosition2D___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _almath.vectorPosition2D___bool__(self)
def __len__(self):
"""__len__(self) -> size_type"""
return _almath.vectorPosition2D___len__(self)
def pop(self):
"""pop(self) -> value_type"""
return _almath.vectorPosition2D_pop(self)
def __getslice__(self, *args):
"""__getslice__(self, difference_type i, difference_type j) -> vectorPosition2D"""
return _almath.vectorPosition2D___getslice__(self, *args)
def __setslice__(self, *args):
"""__setslice__(self, difference_type i, difference_type j, vectorPosition2D v)"""
return _almath.vectorPosition2D___setslice__(self, *args)
def __delslice__(self, *args):
"""__delslice__(self, difference_type i, difference_type j)"""
return _almath.vectorPosition2D___delslice__(self, *args)
def __delitem__(self, *args):
"""
__delitem__(self, difference_type i)
__delitem__(self, PySliceObject slice)
"""
return _almath.vectorPosition2D___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, PySliceObject slice) -> vectorPosition2D
__getitem__(self, difference_type i) -> value_type
"""
return _almath.vectorPosition2D___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, PySliceObject slice, vectorPosition2D v)
__setitem__(self, difference_type i, value_type x)
"""
return _almath.vectorPosition2D___setitem__(self, *args)
def append(self, *args):
"""append(self, value_type x)"""
return _almath.vectorPosition2D_append(self, *args)
def empty(self):
"""empty(self) -> bool"""
return _almath.vectorPosition2D_empty(self)
def size(self):
"""size(self) -> size_type"""
return _almath.vectorPosition2D_size(self)
def clear(self):
"""clear(self)"""
return _almath.vectorPosition2D_clear(self)
def swap(self, *args):
"""swap(self, vectorPosition2D v)"""
return _almath.vectorPosition2D_swap(self, *args)
def get_allocator(self):
"""get_allocator(self) -> allocator_type"""
return _almath.vectorPosition2D_get_allocator(self)
def begin(self):
"""begin(self) -> const_iterator"""
return _almath.vectorPosition2D_begin(self)
def end(self):
"""end(self) -> const_iterator"""
return _almath.vectorPosition2D_end(self)
def rbegin(self):
"""rbegin(self) -> const_reverse_iterator"""
return _almath.vectorPosition2D_rbegin(self)
def rend(self):
"""rend(self) -> const_reverse_iterator"""
return _almath.vectorPosition2D_rend(self)
def pop_back(self):
"""pop_back(self)"""
return _almath.vectorPosition2D_pop_back(self)
def erase(self, *args):
"""
erase(self, iterator pos) -> iterator
erase(self, iterator first, iterator last) -> iterator
"""
return _almath.vectorPosition2D_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> vectorPosition2D
__init__(self, vectorPosition2D arg0) -> vectorPosition2D
__init__(self, size_type size) -> vectorPosition2D
__init__(self, size_type size, value_type value) -> vectorPosition2D
"""
this = _almath.new_vectorPosition2D(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args):
"""push_back(self, value_type x)"""
return _almath.vectorPosition2D_push_back(self, *args)
def front(self):
"""front(self) -> value_type"""
return _almath.vectorPosition2D_front(self)
def back(self):
"""back(self) -> value_type"""
return _almath.vectorPosition2D_back(self)
def assign(self, *args):
"""assign(self, size_type n, value_type x)"""
return _almath.vectorPosition2D_assign(self, *args)
def resize(self, *args):
"""
resize(self, size_type new_size)
resize(self, size_type new_size, value_type x)
"""
return _almath.vectorPosition2D_resize(self, *args)
def insert(self, *args):
"""
insert(self, iterator pos, value_type x) -> iterator
insert(self, iterator pos, size_type n, value_type x)
"""
return _almath.vectorPosition2D_insert(self, *args)
def reserve(self, *args):
"""reserve(self, size_type n)"""
return _almath.vectorPosition2D_reserve(self, *args)
def capacity(self):
"""capacity(self) -> size_type"""
return _almath.vectorPosition2D_capacity(self)
def __repr__(self):
"""__repr__(self) -> string"""
return _almath.vectorPosition2D___repr__(self)
__swig_destroy__ = _almath.delete_vectorPosition2D
__del__ = lambda self : None;
vectorPosition2D_swigregister = _almath.vectorPosition2D_swigregister
vectorPosition2D_swigregister(vectorPosition2D)
class vectorPose2D(_object):
"""Proxy of C++ std::vector<(AL::Math::Pose2D)> class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorPose2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorPose2D, name)
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _almath.vectorPose2D_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _almath.vectorPose2D___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _almath.vectorPose2D___bool__(self)
def __len__(self):
"""__len__(self) -> size_type"""
return _almath.vectorPose2D___len__(self)
def pop(self):
"""pop(self) -> value_type"""
return _almath.vectorPose2D_pop(self)
def __getslice__(self, *args):
"""__getslice__(self, difference_type i, difference_type j) -> vectorPose2D"""
return _almath.vectorPose2D___getslice__(self, *args)
def __setslice__(self, *args):
"""__setslice__(self, difference_type i, difference_type j, vectorPose2D v)"""
return _almath.vectorPose2D___setslice__(self, *args)
def __delslice__(self, *args):
"""__delslice__(self, difference_type i, difference_type j)"""
return _almath.vectorPose2D___delslice__(self, *args)
def __delitem__(self, *args):
"""
__delitem__(self, difference_type i)
__delitem__(self, PySliceObject slice)
"""
return _almath.vectorPose2D___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, PySliceObject slice) -> vectorPose2D
__getitem__(self, difference_type i) -> value_type
"""
return _almath.vectorPose2D___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, PySliceObject slice, vectorPose2D v)
__setitem__(self, difference_type i, value_type x)
"""
return _almath.vectorPose2D___setitem__(self, *args)
def append(self, *args):
"""append(self, value_type x)"""
return _almath.vectorPose2D_append(self, *args)
def empty(self):
"""empty(self) -> bool"""
return _almath.vectorPose2D_empty(self)
def size(self):
"""size(self) -> size_type"""
return _almath.vectorPose2D_size(self)
def clear(self):
"""clear(self)"""
return _almath.vectorPose2D_clear(self)
def swap(self, *args):
"""swap(self, vectorPose2D v)"""
return _almath.vectorPose2D_swap(self, *args)
def get_allocator(self):
"""get_allocator(self) -> allocator_type"""
return _almath.vectorPose2D_get_allocator(self)
def begin(self):
"""begin(self) -> const_iterator"""
return _almath.vectorPose2D_begin(self)
def end(self):
"""end(self) -> const_iterator"""
return _almath.vectorPose2D_end(self)
def rbegin(self):
"""rbegin(self) -> const_reverse_iterator"""
return _almath.vectorPose2D_rbegin(self)
def rend(self):
"""rend(self) -> const_reverse_iterator"""
return _almath.vectorPose2D_rend(self)
def pop_back(self):
"""pop_back(self)"""
return _almath.vectorPose2D_pop_back(self)
def erase(self, *args):
"""
erase(self, iterator pos) -> iterator
erase(self, iterator first, iterator last) -> iterator
"""
return _almath.vectorPose2D_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> vectorPose2D
__init__(self, vectorPose2D arg0) -> vectorPose2D
__init__(self, size_type size) -> vectorPose2D
__init__(self, size_type size, value_type value) -> vectorPose2D
"""
this = _almath.new_vectorPose2D(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args):
"""push_back(self, value_type x)"""
return _almath.vectorPose2D_push_back(self, *args)
def front(self):
"""front(self) -> value_type"""
return _almath.vectorPose2D_front(self)
def back(self):
"""back(self) -> value_type"""
return _almath.vectorPose2D_back(self)
def assign(self, *args):
"""assign(self, size_type n, value_type x)"""
return _almath.vectorPose2D_assign(self, *args)
def resize(self, *args):
"""
resize(self, size_type new_size)
resize(self, size_type new_size, value_type x)
"""
return _almath.vectorPose2D_resize(self, *args)
def insert(self, *args):
"""
insert(self, iterator pos, value_type x) -> iterator
insert(self, iterator pos, size_type n, value_type x)
"""
return _almath.vectorPose2D_insert(self, *args)
def reserve(self, *args):
"""reserve(self, size_type n)"""
return _almath.vectorPose2D_reserve(self, *args)
def capacity(self):
"""capacity(self) -> size_type"""
return _almath.vectorPose2D_capacity(self)
def __repr__(self):
"""__repr__(self) -> string"""
return _almath.vectorPose2D___repr__(self)
__swig_destroy__ = _almath.delete_vectorPose2D
__del__ = lambda self : None;
vectorPose2D_swigregister = _almath.vectorPose2D_swigregister
vectorPose2D_swigregister(vectorPose2D)
class vectorPosition6D(_object):
"""Proxy of C++ std::vector<(AL::Math::Position6D)> class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorPosition6D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorPosition6D, name)
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _almath.vectorPosition6D_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _almath.vectorPosition6D___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _almath.vectorPosition6D___bool__(self)
def __len__(self):
"""__len__(self) -> size_type"""
return _almath.vectorPosition6D___len__(self)
def pop(self):
"""pop(self) -> value_type"""
return _almath.vectorPosition6D_pop(self)
def __getslice__(self, *args):
"""__getslice__(self, difference_type i, difference_type j) -> vectorPosition6D"""
return _almath.vectorPosition6D___getslice__(self, *args)
def __setslice__(self, *args):
"""__setslice__(self, difference_type i, difference_type j, vectorPosition6D v)"""
return _almath.vectorPosition6D___setslice__(self, *args)
def __delslice__(self, *args):
"""__delslice__(self, difference_type i, difference_type j)"""
return _almath.vectorPosition6D___delslice__(self, *args)
def __delitem__(self, *args):
"""
__delitem__(self, difference_type i)
__delitem__(self, PySliceObject slice)
"""
return _almath.vectorPosition6D___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, PySliceObject slice) -> vectorPosition6D
__getitem__(self, difference_type i) -> value_type
"""
return _almath.vectorPosition6D___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, PySliceObject slice, vectorPosition6D v)
__setitem__(self, difference_type i, value_type x)
"""
return _almath.vectorPosition6D___setitem__(self, *args)
def append(self, *args):
"""append(self, value_type x)"""
return _almath.vectorPosition6D_append(self, *args)
def empty(self):
"""empty(self) -> bool"""
return _almath.vectorPosition6D_empty(self)
def size(self):
"""size(self) -> size_type"""
return _almath.vectorPosition6D_size(self)
def clear(self):
"""clear(self)"""
return _almath.vectorPosition6D_clear(self)
def swap(self, *args):
"""swap(self, vectorPosition6D v)"""
return _almath.vectorPosition6D_swap(self, *args)
def get_allocator(self):
"""get_allocator(self) -> allocator_type"""
return _almath.vectorPosition6D_get_allocator(self)
def begin(self):
"""begin(self) -> const_iterator"""
return _almath.vectorPosition6D_begin(self)
def end(self):
"""end(self) -> const_iterator"""
return _almath.vectorPosition6D_end(self)
def rbegin(self):
"""rbegin(self) -> const_reverse_iterator"""
return _almath.vectorPosition6D_rbegin(self)
def rend(self):
"""rend(self) -> const_reverse_iterator"""
return _almath.vectorPosition6D_rend(self)
def pop_back(self):
"""pop_back(self)"""
return _almath.vectorPosition6D_pop_back(self)
def erase(self, *args):
"""
erase(self, iterator pos) -> iterator
erase(self, iterator first, iterator last) -> iterator
"""
return _almath.vectorPosition6D_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> vectorPosition6D
__init__(self, vectorPosition6D arg0) -> vectorPosition6D
__init__(self, size_type size) -> vectorPosition6D
__init__(self, size_type size, value_type value) -> vectorPosition6D
"""
this = _almath.new_vectorPosition6D(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args):
"""push_back(self, value_type x)"""
return _almath.vectorPosition6D_push_back(self, *args)
def front(self):
"""front(self) -> value_type"""
return _almath.vectorPosition6D_front(self)
def back(self):
"""back(self) -> value_type"""
return _almath.vectorPosition6D_back(self)
def assign(self, *args):
"""assign(self, size_type n, value_type x)"""
return _almath.vectorPosition6D_assign(self, *args)
def resize(self, *args):
"""
resize(self, size_type new_size)
resize(self, size_type new_size, value_type x)
"""
return _almath.vectorPosition6D_resize(self, *args)
def insert(self, *args):
"""
insert(self, iterator pos, value_type x) -> iterator
insert(self, iterator pos, size_type n, value_type x)
"""
return _almath.vectorPosition6D_insert(self, *args)
def reserve(self, *args):
"""reserve(self, size_type n)"""
return _almath.vectorPosition6D_reserve(self, *args)
def capacity(self):
"""capacity(self) -> size_type"""
return _almath.vectorPosition6D_capacity(self)
def __repr__(self):
"""__repr__(self) -> string"""
return _almath.vectorPosition6D___repr__(self)
__swig_destroy__ = _almath.delete_vectorPosition6D
__del__ = lambda self : None;
vectorPosition6D_swigregister = _almath.vectorPosition6D_swigregister
vectorPosition6D_swigregister(vectorPosition6D)
class Pose2D(_object):
"""Proxy of C++ AL::Math::Pose2D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Pose2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Pose2D, name)
__swig_setmethods__["x"] = _almath.Pose2D_x_set
__swig_getmethods__["x"] = _almath.Pose2D_x_get
if _newclass:x = _swig_property(_almath.Pose2D_x_get, _almath.Pose2D_x_set)
__swig_setmethods__["y"] = _almath.Pose2D_y_set
__swig_getmethods__["y"] = _almath.Pose2D_y_get
if _newclass:y = _swig_property(_almath.Pose2D_y_get, _almath.Pose2D_y_set)
__swig_setmethods__["theta"] = _almath.Pose2D_theta_set
__swig_getmethods__["theta"] = _almath.Pose2D_theta_get
if _newclass:theta = _swig_property(_almath.Pose2D_theta_get, _almath.Pose2D_theta_set)
def __init__(self, *args):
"""
__init__(self) -> Pose2D
__init__(self, float pInit) -> Pose2D
__init__(self, float pX, float pY, float pTheta) -> Pose2D
__init__(self, vectorFloat pFloats) -> Pose2D
"""
this = _almath.new_Pose2D(*args)
try: self.this.append(this)
except: self.this = this
def __add__(self, *args):
"""__add__(self, Pose2D pPos2) -> Pose2D"""
return _almath.Pose2D___add__(self, *args)
def __sub__(self, *args):
"""__sub__(self, Pose2D pPos2) -> Pose2D"""
return _almath.Pose2D___sub__(self, *args)
def __pos__(self):
"""__pos__(self) -> Pose2D"""
return _almath.Pose2D___pos__(self)
def __neg__(self):
"""__neg__(self) -> Pose2D"""
return _almath.Pose2D___neg__(self)
def __iadd__(self, *args):
"""__iadd__(self, Pose2D pPos2) -> Pose2D"""
return _almath.Pose2D___iadd__(self, *args)
def __isub__(self, *args):
"""__isub__(self, Pose2D pPos2) -> Pose2D"""
return _almath.Pose2D___isub__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Pose2D pPos2) -> bool"""
return _almath.Pose2D___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Pose2D pPos2) -> bool"""
return _almath.Pose2D___ne__(self, *args)
def __mul__(self, *args):
"""
__mul__(self, Pose2D pPos2) -> Pose2D
__mul__(self, float pVal) -> Pose2D
"""
return _almath.Pose2D___mul__(self, *args)
def __div__(self, *args):
"""__div__(self, float pVal) -> Pose2D"""
return _almath.Pose2D___div__(self, *args)
def __imul__(self, *args):
"""
__imul__(self, Pose2D pPos2) -> Pose2D
__imul__(self, float pVal) -> Pose2D
"""
return _almath.Pose2D___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Pose2D"""
return _almath.Pose2D___idiv__(self, *args)
def distanceSquared(self, *args):
"""distanceSquared(self, Pose2D pPos2) -> float"""
return _almath.Pose2D_distanceSquared(self, *args)
def distance(self, *args):
"""distance(self, Pose2D pPos2) -> float"""
return _almath.Pose2D_distance(self, *args)
def inverse(self):
"""inverse(self) -> Pose2D"""
return _almath.Pose2D_inverse(self)
def isNear(self, *args):
"""
isNear(self, Pose2D pPos2, float pEpsilon = 0.0001) -> bool
isNear(self, Pose2D pPos2) -> bool
"""
return _almath.Pose2D_isNear(self, *args)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Pose2D_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Pose2D___repr__(self)
def __rmul__(self, *args):
"""__rmul__(self, float lhs) -> Pose2D"""
return _almath.Pose2D___rmul__(self, *args)
__swig_destroy__ = _almath.delete_Pose2D
__del__ = lambda self : None;
Pose2D_swigregister = _almath.Pose2D_swigregister
Pose2D_swigregister(Pose2D)
cvar = _almath.cvar
AXIS_MASK_X = cvar.AXIS_MASK_X
AXIS_MASK_Y = cvar.AXIS_MASK_Y
AXIS_MASK_XY = cvar.AXIS_MASK_XY
AXIS_MASK_Z = cvar.AXIS_MASK_Z
AXIS_MASK_WX = cvar.AXIS_MASK_WX
AXIS_MASK_WY = cvar.AXIS_MASK_WY
AXIS_MASK_WZ = cvar.AXIS_MASK_WZ
AXIS_MASK_WYWZ = cvar.AXIS_MASK_WYWZ
AXIS_MASK_ALL = cvar.AXIS_MASK_ALL
AXIS_MASK_VEL = cvar.AXIS_MASK_VEL
AXIS_MASK_ROT = cvar.AXIS_MASK_ROT
AXIS_MASK_NONE = cvar.AXIS_MASK_NONE
class Position2D(_object):
"""Proxy of C++ AL::Math::Position2D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Position2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Position2D, name)
__swig_setmethods__["x"] = _almath.Position2D_x_set
__swig_getmethods__["x"] = _almath.Position2D_x_get
if _newclass:x = _swig_property(_almath.Position2D_x_get, _almath.Position2D_x_set)
__swig_setmethods__["y"] = _almath.Position2D_y_set
__swig_getmethods__["y"] = _almath.Position2D_y_get
if _newclass:y = _swig_property(_almath.Position2D_y_get, _almath.Position2D_y_set)
def __init__(self, *args):
"""
__init__(self) -> Position2D
__init__(self, float pInit) -> Position2D
__init__(self, float pX, float pY) -> Position2D
__init__(self, vectorFloat pFloats) -> Position2D
"""
this = _almath.new_Position2D(*args)
try: self.this.append(this)
except: self.this = this
def __add__(self, *args):
"""__add__(self, Position2D pPos2) -> Position2D"""
return _almath.Position2D___add__(self, *args)
def __sub__(self, *args):
"""__sub__(self, Position2D pPos2) -> Position2D"""
return _almath.Position2D___sub__(self, *args)
def __pos__(self):
"""__pos__(self) -> Position2D"""
return _almath.Position2D___pos__(self)
def __neg__(self):
"""__neg__(self) -> Position2D"""
return _almath.Position2D___neg__(self)
def __iadd__(self, *args):
"""__iadd__(self, Position2D pPos2) -> Position2D"""
return _almath.Position2D___iadd__(self, *args)
def __isub__(self, *args):
"""__isub__(self, Position2D pPos2) -> Position2D"""
return _almath.Position2D___isub__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Position2D pPos2) -> bool"""
return _almath.Position2D___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Position2D pPos2) -> bool"""
return _almath.Position2D___ne__(self, *args)
def __mul__(self, *args):
"""__mul__(self, float pVal) -> Position2D"""
return _almath.Position2D___mul__(self, *args)
def __div__(self, *args):
"""__div__(self, float pVal) -> Position2D"""
return _almath.Position2D___div__(self, *args)
def __imul__(self, *args):
"""__imul__(self, float pVal) -> Position2D"""
return _almath.Position2D___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Position2D"""
return _almath.Position2D___idiv__(self, *args)
def distanceSquared(self, *args):
"""distanceSquared(self, Position2D pPos2) -> float"""
return _almath.Position2D_distanceSquared(self, *args)
def distance(self, *args):
"""distance(self, Position2D pPos2) -> float"""
return _almath.Position2D_distance(self, *args)
def isNear(self, *args):
"""
isNear(self, Position2D pPos2, float pEpsilon = 0.0001) -> bool
isNear(self, Position2D pPos2) -> bool
"""
return _almath.Position2D_isNear(self, *args)
def norm(self):
"""norm(self) -> float"""
return _almath.Position2D_norm(self)
def normalize(self):
"""normalize(self) -> Position2D"""
return _almath.Position2D_normalize(self)
def crossProduct(self, *args):
"""crossProduct(self, Position2D pPos2) -> float"""
return _almath.Position2D_crossProduct(self, *args)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Position2D_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Position2D___repr__(self)
def __rmul__(self, *args):
"""__rmul__(self, float lhs) -> Position2D"""
return _almath.Position2D___rmul__(self, *args)
__swig_destroy__ = _almath.delete_Position2D
__del__ = lambda self : None;
Position2D_swigregister = _almath.Position2D_swigregister
Position2D_swigregister(Position2D)
def pose2DInverse(*args):
"""
pose2DInverse(Pose2D pPos) -> Pose2D
pose2DInverse(Pose2D pPos, Pose2D pRes)
"""
return _almath.pose2DInverse(*args)
class Position3D(_object):
"""Proxy of C++ AL::Math::Position3D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Position3D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Position3D, name)
__swig_setmethods__["x"] = _almath.Position3D_x_set
__swig_getmethods__["x"] = _almath.Position3D_x_get
if _newclass:x = _swig_property(_almath.Position3D_x_get, _almath.Position3D_x_set)
__swig_setmethods__["y"] = _almath.Position3D_y_set
__swig_getmethods__["y"] = _almath.Position3D_y_get
if _newclass:y = _swig_property(_almath.Position3D_y_get, _almath.Position3D_y_set)
__swig_setmethods__["z"] = _almath.Position3D_z_set
__swig_getmethods__["z"] = _almath.Position3D_z_get
if _newclass:z = _swig_property(_almath.Position3D_z_get, _almath.Position3D_z_set)
def __init__(self, *args):
"""
__init__(self) -> Position3D
__init__(self, float pInit) -> Position3D
__init__(self, float pX, float pY, float pZ) -> Position3D
__init__(self, vectorFloat pFloats) -> Position3D
"""
this = _almath.new_Position3D(*args)
try: self.this.append(this)
except: self.this = this
def __add__(self, *args):
"""__add__(self, Position3D pPos2) -> Position3D"""
return _almath.Position3D___add__(self, *args)
def __sub__(self, *args):
"""__sub__(self, Position3D pPos2) -> Position3D"""
return _almath.Position3D___sub__(self, *args)
def __pos__(self):
"""__pos__(self) -> Position3D"""
return _almath.Position3D___pos__(self)
def __neg__(self):
"""__neg__(self) -> Position3D"""
return _almath.Position3D___neg__(self)
def __iadd__(self, *args):
"""__iadd__(self, Position3D pPos2) -> Position3D"""
return _almath.Position3D___iadd__(self, *args)
def __isub__(self, *args):
"""__isub__(self, Position3D pPos2) -> Position3D"""
return _almath.Position3D___isub__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Position3D pPos2) -> bool"""
return _almath.Position3D___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Position3D pPos2) -> bool"""
return _almath.Position3D___ne__(self, *args)
def __mul__(self, *args):
"""__mul__(self, float pVal) -> Position3D"""
return _almath.Position3D___mul__(self, *args)
def __div__(self, *args):
"""__div__(self, float pVal) -> Position3D"""
return _almath.Position3D___div__(self, *args)
def __imul__(self, *args):
"""__imul__(self, float pVal) -> Position3D"""
return _almath.Position3D___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Position3D"""
return _almath.Position3D___idiv__(self, *args)
def distanceSquared(self, *args):
"""distanceSquared(self, Position3D pPos2) -> float"""
return _almath.Position3D_distanceSquared(self, *args)
def distance(self, *args):
"""distance(self, Position3D pPos2) -> float"""
return _almath.Position3D_distance(self, *args)
def isNear(self, *args):
"""
isNear(self, Position3D pPos2, float pEpsilon = 0.0001) -> bool
isNear(self, Position3D pPos2) -> bool
"""
return _almath.Position3D_isNear(self, *args)
def norm(self):
"""norm(self) -> float"""
return _almath.Position3D_norm(self)
def normalize(self):
"""normalize(self) -> Position3D"""
return _almath.Position3D_normalize(self)
def dotProduct(self, *args):
"""dotProduct(self, Position3D pPos2) -> float"""
return _almath.Position3D_dotProduct(self, *args)
def crossProduct(self, *args):
"""crossProduct(self, Position3D pPos2) -> Position3D"""
return _almath.Position3D_crossProduct(self, *args)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Position3D_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Position3D___repr__(self)
def __rmul__(self, *args):
"""__rmul__(self, float lhs) -> Position3D"""
return _almath.Position3D___rmul__(self, *args)
__swig_destroy__ = _almath.delete_Position3D
__del__ = lambda self : None;
Position3D_swigregister = _almath.Position3D_swigregister
Position3D_swigregister(Position3D)
def __div__(*args):
"""__div__(float pM, Position3D pPos1) -> Position3D"""
return _almath.__div__(*args)
def dotProduct(*args):
"""dotProduct(Position3D pPos1, Position3D pPos2) -> float"""
return _almath.dotProduct(*args)
class Position6D(_object):
"""Proxy of C++ AL::Math::Position6D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Position6D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Position6D, name)
__swig_setmethods__["x"] = _almath.Position6D_x_set
__swig_getmethods__["x"] = _almath.Position6D_x_get
if _newclass:x = _swig_property(_almath.Position6D_x_get, _almath.Position6D_x_set)
__swig_setmethods__["y"] = _almath.Position6D_y_set
__swig_getmethods__["y"] = _almath.Position6D_y_get
if _newclass:y = _swig_property(_almath.Position6D_y_get, _almath.Position6D_y_set)
__swig_setmethods__["z"] = _almath.Position6D_z_set
__swig_getmethods__["z"] = _almath.Position6D_z_get
if _newclass:z = _swig_property(_almath.Position6D_z_get, _almath.Position6D_z_set)
__swig_setmethods__["wx"] = _almath.Position6D_wx_set
__swig_getmethods__["wx"] = _almath.Position6D_wx_get
if _newclass:wx = _swig_property(_almath.Position6D_wx_get, _almath.Position6D_wx_set)
__swig_setmethods__["wy"] = _almath.Position6D_wy_set
__swig_getmethods__["wy"] = _almath.Position6D_wy_get
if _newclass:wy = _swig_property(_almath.Position6D_wy_get, _almath.Position6D_wy_set)
__swig_setmethods__["wz"] = _almath.Position6D_wz_set
__swig_getmethods__["wz"] = _almath.Position6D_wz_get
if _newclass:wz = _swig_property(_almath.Position6D_wz_get, _almath.Position6D_wz_set)
def __init__(self, *args):
"""
__init__(self) -> Position6D
__init__(self, float pInit) -> Position6D
__init__(self, float pX, float pY, float pZ, float pWx, float pWy,
float pWz) -> Position6D
__init__(self, vectorFloat pFloats) -> Position6D
"""
this = _almath.new_Position6D(*args)
try: self.this.append(this)
except: self.this = this
def __add__(self, *args):
"""__add__(self, Position6D pPos2) -> Position6D"""
return _almath.Position6D___add__(self, *args)
def __sub__(self, *args):
"""__sub__(self, Position6D pPos2) -> Position6D"""
return _almath.Position6D___sub__(self, *args)
def __pos__(self):
"""__pos__(self) -> Position6D"""
return _almath.Position6D___pos__(self)
def __neg__(self):
"""__neg__(self) -> Position6D"""
return _almath.Position6D___neg__(self)
def __iadd__(self, *args):
"""__iadd__(self, Position6D pPos2) -> Position6D"""
return _almath.Position6D___iadd__(self, *args)
def __isub__(self, *args):
"""__isub__(self, Position6D pPos2) -> Position6D"""
return _almath.Position6D___isub__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Position6D pPos2) -> bool"""
return _almath.Position6D___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Position6D pPos2) -> bool"""
return _almath.Position6D___ne__(self, *args)
def __mul__(self, *args):
"""__mul__(self, float pVal) -> Position6D"""
return _almath.Position6D___mul__(self, *args)
def __div__(self, *args):
"""__div__(self, float pVal) -> Position6D"""
return _almath.Position6D___div__(self, *args)
def __imul__(self, *args):
"""__imul__(self, float pVal) -> Position6D"""
return _almath.Position6D___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Position6D"""
return _almath.Position6D___idiv__(self, *args)
def isNear(self, *args):
"""
isNear(self, Position6D pPos2, float pEpsilon = 0.0001) -> bool
isNear(self, Position6D pPos2) -> bool
"""
return _almath.Position6D_isNear(self, *args)
def distanceSquared(self, *args):
"""distanceSquared(self, Position6D pPos2) -> float"""
return _almath.Position6D_distanceSquared(self, *args)
def distance(self, *args):
"""distance(self, Position6D pPos2) -> float"""
return _almath.Position6D_distance(self, *args)
def norm(self):
"""norm(self) -> float"""
return _almath.Position6D_norm(self)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Position6D_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Position6D___repr__(self)
def __rmul__(self, *args):
"""__rmul__(self, float lhs) -> Position6D"""
return _almath.Position6D___rmul__(self, *args)
__swig_destroy__ = _almath.delete_Position6D
__del__ = lambda self : None;
Position6D_swigregister = _almath.Position6D_swigregister
Position6D_swigregister(Position6D)
def crossProduct(*args):
"""
crossProduct(Position2D pPos1, Position2D pPos2) -> float
crossProduct(Position2D pPos1, Position2D pPos2, float pRes)
crossProduct(Position3D pPos1, Position3D pPos2) -> Position3D
crossProduct(Position3D pPos1, Position3D pPos2, Position3D pRes)
"""
return _almath.crossProduct(*args)
class PositionAndVelocity(_object):
"""Proxy of C++ AL::Math::PositionAndVelocity class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PositionAndVelocity, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PositionAndVelocity, name)
__swig_setmethods__["q"] = _almath.PositionAndVelocity_q_set
__swig_getmethods__["q"] = _almath.PositionAndVelocity_q_get
if _newclass:q = _swig_property(_almath.PositionAndVelocity_q_get, _almath.PositionAndVelocity_q_set)
__swig_setmethods__["dq"] = _almath.PositionAndVelocity_dq_set
__swig_getmethods__["dq"] = _almath.PositionAndVelocity_dq_get
if _newclass:dq = _swig_property(_almath.PositionAndVelocity_dq_get, _almath.PositionAndVelocity_dq_set)
def __init__(self, pq = 0.0, pdq = 0.0):
"""
__init__(self, float pq = 0.0, float pdq = 0.0) -> PositionAndVelocity
__init__(self, float pq = 0.0) -> PositionAndVelocity
__init__(self) -> PositionAndVelocity
"""
this = _almath.new_PositionAndVelocity(pq, pdq)
try: self.this.append(this)
except: self.this = this
def isNear(self, *args):
"""
isNear(self, PositionAndVelocity pDat2, float pEpsilon = 0.0001) -> bool
isNear(self, PositionAndVelocity pDat2) -> bool
"""
return _almath.PositionAndVelocity_isNear(self, *args)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.PositionAndVelocity___repr__(self)
__swig_destroy__ = _almath.delete_PositionAndVelocity
__del__ = lambda self : None;
PositionAndVelocity_swigregister = _almath.PositionAndVelocity_swigregister
PositionAndVelocity_swigregister(PositionAndVelocity)
def distanceSquared(*args):
"""
distanceSquared(Pose2D pPos1, Pose2D pPos2) -> float
distanceSquared(Position2D pPos1, Position2D pPos2) -> float
distanceSquared(Position3D pPos1, Position3D pPos2) -> float
distanceSquared(Position6D pPos1, Position6D pPos2) -> float
"""
return _almath.distanceSquared(*args)
def distance(*args):
"""
distance(Pose2D pPos1, Pose2D pPos2) -> float
distance(Position2D pPos1, Position2D pPos2) -> float
distance(Position3D pPos1, Position3D pPos2) -> float
distance(Position6D pPos1, Position6D pPos2) -> float
"""
return _almath.distance(*args)
class Quaternion(_object):
"""Proxy of C++ AL::Math::Quaternion class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Quaternion, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Quaternion, name)
__swig_setmethods__["w"] = _almath.Quaternion_w_set
__swig_getmethods__["w"] = _almath.Quaternion_w_get
if _newclass:w = _swig_property(_almath.Quaternion_w_get, _almath.Quaternion_w_set)
__swig_setmethods__["x"] = _almath.Quaternion_x_set
__swig_getmethods__["x"] = _almath.Quaternion_x_get
if _newclass:x = _swig_property(_almath.Quaternion_x_get, _almath.Quaternion_x_set)
__swig_setmethods__["y"] = _almath.Quaternion_y_set
__swig_getmethods__["y"] = _almath.Quaternion_y_get
if _newclass:y = _swig_property(_almath.Quaternion_y_get, _almath.Quaternion_y_set)
__swig_setmethods__["z"] = _almath.Quaternion_z_set
__swig_getmethods__["z"] = _almath.Quaternion_z_get
if _newclass:z = _swig_property(_almath.Quaternion_z_get, _almath.Quaternion_z_set)
def __init__(self, *args):
"""
__init__(self) -> Quaternion
__init__(self, float pW, float pX, float pY, float pZ) -> Quaternion
__init__(self, vectorFloat pFloats) -> Quaternion
"""
this = _almath.new_Quaternion(*args)
try: self.this.append(this)
except: self.this = this
def __mul__(self, *args):
"""__mul__(self, Quaternion pQua2) -> Quaternion"""
return _almath.Quaternion___mul__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Quaternion pQua2) -> bool"""
return _almath.Quaternion___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Quaternion pQua2) -> bool"""
return _almath.Quaternion___ne__(self, *args)
def __imul__(self, *args):
"""
__imul__(self, Quaternion pQu2) -> Quaternion
__imul__(self, float pVal) -> Quaternion
"""
return _almath.Quaternion___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Quaternion"""
return _almath.Quaternion___idiv__(self, *args)
def isNear(self, *args):
"""
isNear(self, Quaternion pQua2, float pEpsilon = 0.0001) -> bool
isNear(self, Quaternion pQua2) -> bool
"""
return _almath.Quaternion_isNear(self, *args)
def norm(self):
"""norm(self) -> float"""
return _almath.Quaternion_norm(self)
def normalize(self):
"""normalize(self) -> Quaternion"""
return _almath.Quaternion_normalize(self)
def inverse(self):
"""inverse(self) -> Quaternion"""
return _almath.Quaternion_inverse(self)
def fromAngleAndAxisRotation(*args):
"""fromAngleAndAxisRotation(float pAngle, float pAxisX, float pAxisY, float pAxisZ) -> Quaternion"""
return _almath.Quaternion_fromAngleAndAxisRotation(*args)
if _newclass:fromAngleAndAxisRotation = staticmethod(fromAngleAndAxisRotation)
__swig_getmethods__["fromAngleAndAxisRotation"] = lambda x: fromAngleAndAxisRotation
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Quaternion_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Quaternion___repr__(self)
__swig_destroy__ = _almath.delete_Quaternion
__del__ = lambda self : None;
Quaternion_swigregister = _almath.Quaternion_swigregister
Quaternion_swigregister(Quaternion)
def Quaternion_fromAngleAndAxisRotation(*args):
"""Quaternion_fromAngleAndAxisRotation(float pAngle, float pAxisX, float pAxisY, float pAxisZ) -> Quaternion"""
return _almath.Quaternion_fromAngleAndAxisRotation(*args)
def quaternionFromAngleAndAxisRotation(*args):
"""quaternionFromAngleAndAxisRotation(float pAngle, float pAxisX, float pAxisY, float pAxisZ) -> Quaternion"""
return _almath.quaternionFromAngleAndAxisRotation(*args)
def angleAndAxisRotationFromQuaternion(*args):
"""
angleAndAxisRotationFromQuaternion(Quaternion pQuaternion, float pAngle, float pAxisX,
float pAxisY, float pAxisZ)
"""
return _almath.angleAndAxisRotationFromQuaternion(*args)
class Rotation(_object):
"""Proxy of C++ AL::Math::Rotation class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Rotation, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Rotation, name)
__swig_setmethods__["r1_c1"] = _almath.Rotation_r1_c1_set
__swig_getmethods__["r1_c1"] = _almath.Rotation_r1_c1_get
if _newclass:r1_c1 = _swig_property(_almath.Rotation_r1_c1_get, _almath.Rotation_r1_c1_set)
__swig_setmethods__["r1_c2"] = _almath.Rotation_r1_c2_set
__swig_getmethods__["r1_c2"] = _almath.Rotation_r1_c2_get
if _newclass:r1_c2 = _swig_property(_almath.Rotation_r1_c2_get, _almath.Rotation_r1_c2_set)
__swig_setmethods__["r1_c3"] = _almath.Rotation_r1_c3_set
__swig_getmethods__["r1_c3"] = _almath.Rotation_r1_c3_get
if _newclass:r1_c3 = _swig_property(_almath.Rotation_r1_c3_get, _almath.Rotation_r1_c3_set)
__swig_setmethods__["r2_c1"] = _almath.Rotation_r2_c1_set
__swig_getmethods__["r2_c1"] = _almath.Rotation_r2_c1_get
if _newclass:r2_c1 = _swig_property(_almath.Rotation_r2_c1_get, _almath.Rotation_r2_c1_set)
__swig_setmethods__["r2_c2"] = _almath.Rotation_r2_c2_set
__swig_getmethods__["r2_c2"] = _almath.Rotation_r2_c2_get
if _newclass:r2_c2 = _swig_property(_almath.Rotation_r2_c2_get, _almath.Rotation_r2_c2_set)
__swig_setmethods__["r2_c3"] = _almath.Rotation_r2_c3_set
__swig_getmethods__["r2_c3"] = _almath.Rotation_r2_c3_get
if _newclass:r2_c3 = _swig_property(_almath.Rotation_r2_c3_get, _almath.Rotation_r2_c3_set)
__swig_setmethods__["r3_c1"] = _almath.Rotation_r3_c1_set
__swig_getmethods__["r3_c1"] = _almath.Rotation_r3_c1_get
if _newclass:r3_c1 = _swig_property(_almath.Rotation_r3_c1_get, _almath.Rotation_r3_c1_set)
__swig_setmethods__["r3_c2"] = _almath.Rotation_r3_c2_set
__swig_getmethods__["r3_c2"] = _almath.Rotation_r3_c2_get
if _newclass:r3_c2 = _swig_property(_almath.Rotation_r3_c2_get, _almath.Rotation_r3_c2_set)
__swig_setmethods__["r3_c3"] = _almath.Rotation_r3_c3_set
__swig_getmethods__["r3_c3"] = _almath.Rotation_r3_c3_get
if _newclass:r3_c3 = _swig_property(_almath.Rotation_r3_c3_get, _almath.Rotation_r3_c3_set)
def __init__(self, *args):
"""
__init__(self) -> Rotation
__init__(self, vectorFloat pFloats) -> Rotation
"""
this = _almath.new_Rotation(*args)
try: self.this.append(this)
except: self.this = this
def __imul__(self, *args):
"""__imul__(self, Rotation pRot2) -> Rotation"""
return _almath.Rotation___imul__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Rotation pRot2) -> bool"""
return _almath.Rotation___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Rotation pRot2) -> bool"""
return _almath.Rotation___ne__(self, *args)
def isNear(self, *args):
"""
isNear(self, Rotation pRot2, float pEpsilon = 0.0001) -> bool
isNear(self, Rotation pRot2) -> bool
"""
return _almath.Rotation_isNear(self, *args)
def transpose(self):
"""transpose(self) -> Rotation"""
return _almath.Rotation_transpose(self)
def determinant(self):
"""determinant(self) -> float"""
return _almath.Rotation_determinant(self)
def fromQuaternion(*args):
"""fromQuaternion(float pA, float pB, float pC, float pD) -> Rotation"""
return _almath.Rotation_fromQuaternion(*args)
if _newclass:fromQuaternion = staticmethod(fromQuaternion)
__swig_getmethods__["fromQuaternion"] = lambda x: fromQuaternion
def fromAngleDirection(*args):
"""fromAngleDirection(float pAngle, float pX, float pY, float pZ) -> Rotation"""
return _almath.Rotation_fromAngleDirection(*args)
if _newclass:fromAngleDirection = staticmethod(fromAngleDirection)
__swig_getmethods__["fromAngleDirection"] = lambda x: fromAngleDirection
def fromRotX(*args):
"""fromRotX(float pRotX) -> Rotation"""
return _almath.Rotation_fromRotX(*args)
if _newclass:fromRotX = staticmethod(fromRotX)
__swig_getmethods__["fromRotX"] = lambda x: fromRotX
def fromRotY(*args):
"""fromRotY(float pRotY) -> Rotation"""
return _almath.Rotation_fromRotY(*args)
if _newclass:fromRotY = staticmethod(fromRotY)
__swig_getmethods__["fromRotY"] = lambda x: fromRotY
def fromRotZ(*args):
"""fromRotZ(float pRotZ) -> Rotation"""
return _almath.Rotation_fromRotZ(*args)
if _newclass:fromRotZ = staticmethod(fromRotZ)
__swig_getmethods__["fromRotZ"] = lambda x: fromRotZ
def from3DRotation(*args):
"""from3DRotation(float pWX, float pWY, float pWZ) -> Rotation"""
return _almath.Rotation_from3DRotation(*args)
if _newclass:from3DRotation = staticmethod(from3DRotation)
__swig_getmethods__["from3DRotation"] = lambda x: from3DRotation
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Rotation_toVector(self)
def __str__(self):
"""__str__(self) -> char"""
return _almath.Rotation___str__(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Rotation___repr__(self)
def __mul__(self, *args):
"""
__mul__(self, Rotation pRot2) -> Rotation
__mul__(self, Position3D rhs) -> Position3D
"""
return _almath.Rotation___mul__(self, *args)
__swig_destroy__ = _almath.delete_Rotation
__del__ = lambda self : None;
Rotation_swigregister = _almath.Rotation_swigregister
Rotation_swigregister(Rotation)
def quaternionInverse(*args):
"""
quaternionInverse(Quaternion pQua, Quaternion pQuaOut)
quaternionInverse(Quaternion pQua) -> Quaternion
"""
return _almath.quaternionInverse(*args)
def Rotation_fromQuaternion(*args):
"""Rotation_fromQuaternion(float pA, float pB, float pC, float pD) -> Rotation"""
return _almath.Rotation_fromQuaternion(*args)
def Rotation_fromAngleDirection(*args):
"""Rotation_fromAngleDirection(float pAngle, float pX, float pY, float pZ) -> Rotation"""
return _almath.Rotation_fromAngleDirection(*args)
def Rotation_fromRotX(*args):
"""Rotation_fromRotX(float pRotX) -> Rotation"""
return _almath.Rotation_fromRotX(*args)
def Rotation_fromRotY(*args):
"""Rotation_fromRotY(float pRotY) -> Rotation"""
return _almath.Rotation_fromRotY(*args)
def Rotation_fromRotZ(*args):
"""Rotation_fromRotZ(float pRotZ) -> Rotation"""
return _almath.Rotation_fromRotZ(*args)
def Rotation_from3DRotation(*args):
"""Rotation_from3DRotation(float pWX, float pWY, float pWZ) -> Rotation"""
return _almath.Rotation_from3DRotation(*args)
def transpose(*args):
"""transpose(Rotation pRot) -> Rotation"""
return _almath.transpose(*args)
def rotationFromQuaternion(*args):
"""rotationFromQuaternion(float pA, float pB, float pC, float pD) -> Rotation"""
return _almath.rotationFromQuaternion(*args)
def applyRotation(*args):
"""applyRotation(Rotation pRot, float pX, float pY, float pZ)"""
return _almath.applyRotation(*args)
def rotationFromRotX(*args):
"""rotationFromRotX(float pRotX) -> Rotation"""
return _almath.rotationFromRotX(*args)
def rotationFromRotY(*args):
"""rotationFromRotY(float pRotY) -> Rotation"""
return _almath.rotationFromRotY(*args)
def rotationFromRotZ(*args):
"""rotationFromRotZ(float pRotZ) -> Rotation"""
return _almath.rotationFromRotZ(*args)
def rotationFrom3DRotation(*args):
"""rotationFrom3DRotation(float pWX, float pWY, float pWZ) -> Rotation"""
return _almath.rotationFrom3DRotation(*args)
class Rotation3D(_object):
"""Proxy of C++ AL::Math::Rotation3D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Rotation3D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Rotation3D, name)
__swig_setmethods__["wx"] = _almath.Rotation3D_wx_set
__swig_getmethods__["wx"] = _almath.Rotation3D_wx_get
if _newclass:wx = _swig_property(_almath.Rotation3D_wx_get, _almath.Rotation3D_wx_set)
__swig_setmethods__["wy"] = _almath.Rotation3D_wy_set
__swig_getmethods__["wy"] = _almath.Rotation3D_wy_get
if _newclass:wy = _swig_property(_almath.Rotation3D_wy_get, _almath.Rotation3D_wy_set)
__swig_setmethods__["wz"] = _almath.Rotation3D_wz_set
__swig_getmethods__["wz"] = _almath.Rotation3D_wz_get
if _newclass:wz = _swig_property(_almath.Rotation3D_wz_get, _almath.Rotation3D_wz_set)
def __init__(self, *args):
"""
__init__(self) -> Rotation3D
__init__(self, float pInit) -> Rotation3D
__init__(self, float pWx, float pWy, float pWz) -> Rotation3D
__init__(self, vectorFloat pFloats) -> Rotation3D
"""
this = _almath.new_Rotation3D(*args)
try: self.this.append(this)
except: self.this = this
def __add__(self, *args):
"""__add__(self, Rotation3D pRot2) -> Rotation3D"""
return _almath.Rotation3D___add__(self, *args)
def __sub__(self, *args):
"""__sub__(self, Rotation3D pRot2) -> Rotation3D"""
return _almath.Rotation3D___sub__(self, *args)
def __iadd__(self, *args):
"""__iadd__(self, Rotation3D pRot2) -> Rotation3D"""
return _almath.Rotation3D___iadd__(self, *args)
def __isub__(self, *args):
"""__isub__(self, Rotation3D pRot2) -> Rotation3D"""
return _almath.Rotation3D___isub__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Rotation3D pRot2) -> bool"""
return _almath.Rotation3D___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Rotation3D pRot2) -> bool"""
return _almath.Rotation3D___ne__(self, *args)
def __mul__(self, *args):
"""__mul__(self, float pVal) -> Rotation3D"""
return _almath.Rotation3D___mul__(self, *args)
def __div__(self, *args):
"""__div__(self, float pVal) -> Rotation3D"""
return _almath.Rotation3D___div__(self, *args)
def __imul__(self, *args):
"""__imul__(self, float pVal) -> Rotation3D"""
return _almath.Rotation3D___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Rotation3D"""
return _almath.Rotation3D___idiv__(self, *args)
def isNear(self, *args):
"""
isNear(self, Rotation3D pRot2, float pEpsilon = 0.0001) -> bool
isNear(self, Rotation3D pRot2) -> bool
"""
return _almath.Rotation3D_isNear(self, *args)
def norm(self):
"""norm(self) -> float"""
return _almath.Rotation3D_norm(self)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Rotation3D_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Rotation3D___repr__(self)
__swig_destroy__ = _almath.delete_Rotation3D
__del__ = lambda self : None;
Rotation3D_swigregister = _almath.Rotation3D_swigregister
Rotation3D_swigregister(Rotation3D)
class Transform(_object):
"""Proxy of C++ AL::Math::Transform class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Transform, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Transform, name)
__swig_setmethods__["r1_c1"] = _almath.Transform_r1_c1_set
__swig_getmethods__["r1_c1"] = _almath.Transform_r1_c1_get
if _newclass:r1_c1 = _swig_property(_almath.Transform_r1_c1_get, _almath.Transform_r1_c1_set)
__swig_setmethods__["r1_c2"] = _almath.Transform_r1_c2_set
__swig_getmethods__["r1_c2"] = _almath.Transform_r1_c2_get
if _newclass:r1_c2 = _swig_property(_almath.Transform_r1_c2_get, _almath.Transform_r1_c2_set)
__swig_setmethods__["r1_c3"] = _almath.Transform_r1_c3_set
__swig_getmethods__["r1_c3"] = _almath.Transform_r1_c3_get
if _newclass:r1_c3 = _swig_property(_almath.Transform_r1_c3_get, _almath.Transform_r1_c3_set)
__swig_setmethods__["r1_c4"] = _almath.Transform_r1_c4_set
__swig_getmethods__["r1_c4"] = _almath.Transform_r1_c4_get
if _newclass:r1_c4 = _swig_property(_almath.Transform_r1_c4_get, _almath.Transform_r1_c4_set)
__swig_setmethods__["r2_c1"] = _almath.Transform_r2_c1_set
__swig_getmethods__["r2_c1"] = _almath.Transform_r2_c1_get
if _newclass:r2_c1 = _swig_property(_almath.Transform_r2_c1_get, _almath.Transform_r2_c1_set)
__swig_setmethods__["r2_c2"] = _almath.Transform_r2_c2_set
__swig_getmethods__["r2_c2"] = _almath.Transform_r2_c2_get
if _newclass:r2_c2 = _swig_property(_almath.Transform_r2_c2_get, _almath.Transform_r2_c2_set)
__swig_setmethods__["r2_c3"] = _almath.Transform_r2_c3_set
__swig_getmethods__["r2_c3"] = _almath.Transform_r2_c3_get
if _newclass:r2_c3 = _swig_property(_almath.Transform_r2_c3_get, _almath.Transform_r2_c3_set)
__swig_setmethods__["r2_c4"] = _almath.Transform_r2_c4_set
__swig_getmethods__["r2_c4"] = _almath.Transform_r2_c4_get
if _newclass:r2_c4 = _swig_property(_almath.Transform_r2_c4_get, _almath.Transform_r2_c4_set)
__swig_setmethods__["r3_c1"] = _almath.Transform_r3_c1_set
__swig_getmethods__["r3_c1"] = _almath.Transform_r3_c1_get
if _newclass:r3_c1 = _swig_property(_almath.Transform_r3_c1_get, _almath.Transform_r3_c1_set)
__swig_setmethods__["r3_c2"] = _almath.Transform_r3_c2_set
__swig_getmethods__["r3_c2"] = _almath.Transform_r3_c2_get
if _newclass:r3_c2 = _swig_property(_almath.Transform_r3_c2_get, _almath.Transform_r3_c2_set)
__swig_setmethods__["r3_c3"] = _almath.Transform_r3_c3_set
__swig_getmethods__["r3_c3"] = _almath.Transform_r3_c3_get
if _newclass:r3_c3 = _swig_property(_almath.Transform_r3_c3_get, _almath.Transform_r3_c3_set)
__swig_setmethods__["r3_c4"] = _almath.Transform_r3_c4_set
__swig_getmethods__["r3_c4"] = _almath.Transform_r3_c4_get
if _newclass:r3_c4 = _swig_property(_almath.Transform_r3_c4_get, _almath.Transform_r3_c4_set)
def __init__(self, *args):
"""
__init__(self) -> Transform
__init__(self, vectorFloat pFloats) -> Transform
__init__(self, float pPosX, float pPosY, float pPosZ) -> Transform
"""
this = _almath.new_Transform(*args)
try: self.this.append(this)
except: self.this = this
def __imul__(self, *args):
"""__imul__(self, Transform pT2) -> Transform"""
return _almath.Transform___imul__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Transform pT2) -> bool"""
return _almath.Transform___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Transform pT2) -> bool"""
return _almath.Transform___ne__(self, *args)
def isNear(self, *args):
"""
isNear(self, Transform pT2, float pEpsilon = 0.0001) -> bool
isNear(self, Transform pT2) -> bool
"""
return _almath.Transform_isNear(self, *args)
def isTransform(self, pEpsilon = 0.0001):
"""
isTransform(self, float pEpsilon = 0.0001) -> bool
isTransform(self) -> bool
"""
return _almath.Transform_isTransform(self, pEpsilon)
def norm(self):
"""norm(self) -> float"""
return _almath.Transform_norm(self)
def determinant(self):
"""determinant(self) -> float"""
return _almath.Transform_determinant(self)
def inverse(self):
"""inverse(self) -> Transform"""
return _almath.Transform_inverse(self)
def fromRotX(*args):
"""fromRotX(float pRotX) -> Transform"""
return _almath.Transform_fromRotX(*args)
if _newclass:fromRotX = staticmethod(fromRotX)
__swig_getmethods__["fromRotX"] = lambda x: fromRotX
def fromRotY(*args):
"""fromRotY(float pRotY) -> Transform"""
return _almath.Transform_fromRotY(*args)
if _newclass:fromRotY = staticmethod(fromRotY)
__swig_getmethods__["fromRotY"] = lambda x: fromRotY
def fromRotZ(*args):
"""fromRotZ(float pRotZ) -> Transform"""
return _almath.Transform_fromRotZ(*args)
if _newclass:fromRotZ = staticmethod(fromRotZ)
__swig_getmethods__["fromRotZ"] = lambda x: fromRotZ
def from3DRotation(*args):
"""from3DRotation(float pWX, float pWY, float pWZ) -> Transform"""
return _almath.Transform_from3DRotation(*args)
if _newclass:from3DRotation = staticmethod(from3DRotation)
__swig_getmethods__["from3DRotation"] = lambda x: from3DRotation
def fromPosition(*args):
"""
fromPosition(float pX, float pY, float pZ) -> Transform
fromPosition(float pX, float pY, float pZ, float pWX, float pWY,
float pWZ) -> Transform
"""
return _almath.Transform_fromPosition(*args)
if _newclass:fromPosition = staticmethod(fromPosition)
__swig_getmethods__["fromPosition"] = lambda x: fromPosition
def diff(self, *args):
"""diff(self, Transform pT2) -> Transform"""
return _almath.Transform_diff(self, *args)
def distanceSquared(self, *args):
"""distanceSquared(self, Transform pT2) -> float"""
return _almath.Transform_distanceSquared(self, *args)
def distance(self, *args):
"""distance(self, Transform pT2) -> float"""
return _almath.Transform_distance(self, *args)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Transform_toVector(self)
def __str__(self):
"""__str__(self) -> char"""
return _almath.Transform___str__(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Transform___repr__(self)
def __mul__(self, *args):
"""
__mul__(self, Transform pT2) -> Transform
__mul__(self, Position3D rhs) -> Position3D
"""
return _almath.Transform___mul__(self, *args)
__swig_destroy__ = _almath.delete_Transform
__del__ = lambda self : None;
Transform_swigregister = _almath.Transform_swigregister
Transform_swigregister(Transform)
def Transform_fromRotX(*args):
"""Transform_fromRotX(float pRotX) -> Transform"""
return _almath.Transform_fromRotX(*args)
def Transform_fromRotY(*args):
"""Transform_fromRotY(float pRotY) -> Transform"""
return _almath.Transform_fromRotY(*args)
def Transform_fromRotZ(*args):
"""Transform_fromRotZ(float pRotZ) -> Transform"""
return _almath.Transform_fromRotZ(*args)
def Transform_from3DRotation(*args):
"""Transform_from3DRotation(float pWX, float pWY, float pWZ) -> Transform"""
return _almath.Transform_from3DRotation(*args)
def Transform_fromPosition(*args):
"""
fromPosition(float pX, float pY, float pZ) -> Transform
Transform_fromPosition(float pX, float pY, float pZ, float pWX, float pWY,
float pWZ) -> Transform
"""
return _almath.Transform_fromPosition(*args)
def transformPreMultiply(*args):
"""transformPreMultiply(Transform pT, Transform pTOut)"""
return _almath.transformPreMultiply(*args)
def transformFromRotX(*args):
"""transformFromRotX(float pRotX) -> Transform"""
return _almath.transformFromRotX(*args)
def transformFromRotY(*args):
"""transformFromRotY(float pRotY) -> Transform"""
return _almath.transformFromRotY(*args)
def transformFromRotZ(*args):
"""transformFromRotZ(float pRotZ) -> Transform"""
return _almath.transformFromRotZ(*args)
def transformFrom3DRotation(*args):
"""transformFrom3DRotation(float pWX, float pWY, float pWZ) -> Transform"""
return _almath.transformFrom3DRotation(*args)
def transformInvertInPlace(*args):
"""transformInvertInPlace(Transform pT)"""
return _almath.transformInvertInPlace(*args)
def pinv(*args):
"""pinv(Transform pT) -> Transform"""
return _almath.pinv(*args)
def transformDiff(*args):
"""transformDiff(Transform pT1, Transform pT2) -> Transform"""
return _almath.transformDiff(*args)
def transformDistanceSquared(*args):
"""transformDistanceSquared(Transform pT1, Transform pT2) -> float"""
return _almath.transformDistanceSquared(*args)
def transformDistance(*args):
"""transformDistance(Transform pT1, Transform pT2) -> float"""
return _almath.transformDistance(*args)
class Velocity3D(_object):
"""Proxy of C++ AL::Math::Velocity3D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Velocity3D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Velocity3D, name)
__swig_setmethods__["xd"] = _almath.Velocity3D_xd_set
__swig_getmethods__["xd"] = _almath.Velocity3D_xd_get
if _newclass:xd = _swig_property(_almath.Velocity3D_xd_get, _almath.Velocity3D_xd_set)
__swig_setmethods__["yd"] = _almath.Velocity3D_yd_set
__swig_getmethods__["yd"] = _almath.Velocity3D_yd_get
if _newclass:yd = _swig_property(_almath.Velocity3D_yd_get, _almath.Velocity3D_yd_set)
__swig_setmethods__["zd"] = _almath.Velocity3D_zd_set
__swig_getmethods__["zd"] = _almath.Velocity3D_zd_get
if _newclass:zd = _swig_property(_almath.Velocity3D_zd_get, _almath.Velocity3D_zd_set)
def __init__(self, *args):
"""
__init__(self) -> Velocity3D
__init__(self, float pInit) -> Velocity3D
__init__(self, float pXd, float pYd, float pZd) -> Velocity3D
__init__(self, vectorFloat pFloats) -> Velocity3D
"""
this = _almath.new_Velocity3D(*args)
try: self.this.append(this)
except: self.this = this
def __add__(self, *args):
"""__add__(self, Velocity3D pVel2) -> Velocity3D"""
return _almath.Velocity3D___add__(self, *args)
def __sub__(self, *args):
"""__sub__(self, Velocity3D pVel2) -> Velocity3D"""
return _almath.Velocity3D___sub__(self, *args)
def __pos__(self):
"""__pos__(self) -> Velocity3D"""
return _almath.Velocity3D___pos__(self)
def __neg__(self):
"""__neg__(self) -> Velocity3D"""
return _almath.Velocity3D___neg__(self)
def __iadd__(self, *args):
"""__iadd__(self, Velocity3D pVel2) -> Velocity3D"""
return _almath.Velocity3D___iadd__(self, *args)
def __isub__(self, *args):
"""__isub__(self, Velocity3D pVel2) -> Velocity3D"""
return _almath.Velocity3D___isub__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Velocity3D pVel2) -> bool"""
return _almath.Velocity3D___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Velocity3D pVel2) -> bool"""
return _almath.Velocity3D___ne__(self, *args)
def __mul__(self, *args):
"""__mul__(self, float pVal) -> Velocity3D"""
return _almath.Velocity3D___mul__(self, *args)
def __div__(self, *args):
"""__div__(self, float pVal) -> Velocity3D"""
return _almath.Velocity3D___div__(self, *args)
def __imul__(self, *args):
"""__imul__(self, float pVal) -> Velocity3D"""
return _almath.Velocity3D___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Velocity3D"""
return _almath.Velocity3D___idiv__(self, *args)
def isNear(self, *args):
"""
isNear(self, Velocity3D pVel2, float pEpsilon = 0.0001) -> bool
isNear(self, Velocity3D pVel2) -> bool
"""
return _almath.Velocity3D_isNear(self, *args)
def norm(self):
"""norm(self) -> float"""
return _almath.Velocity3D_norm(self)
def normalize(self):
"""normalize(self) -> Velocity3D"""
return _almath.Velocity3D_normalize(self)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Velocity3D_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Velocity3D___repr__(self)
def __rmul__(self, *args):
"""__rmul__(self, float lhs) -> Velocity3D"""
return _almath.Velocity3D___rmul__(self, *args)
__swig_destroy__ = _almath.delete_Velocity3D
__del__ = lambda self : None;
Velocity3D_swigregister = _almath.Velocity3D_swigregister
Velocity3D_swigregister(Velocity3D)
def transformToFloatVector(*args):
"""
transformToFloatVector(Transform pT, vectorFloat pTOut)
transformToFloatVector(Transform pT) -> vectorFloat
"""
return _almath.transformToFloatVector(*args)
def determinant(*args):
"""
determinant(Rotation pRot) -> float
determinant(Transform pT) -> float
determinant(vectorFloat pFloats) -> float
"""
return _almath.determinant(*args)
def transformInverse(*args):
"""
transformInverse(Transform pT, Transform pTOut)
transformInverse(Transform pT) -> Transform
"""
return _almath.transformInverse(*args)
def transformFromPosition(*args):
"""
transformFromPosition(float pX, float pY, float pZ) -> Transform
transformFromPosition(float pX, float pY, float pZ, float pWX, float pWY,
float pWZ) -> Transform
"""
return _almath.transformFromPosition(*args)
class Velocity6D(_object):
"""Proxy of C++ AL::Math::Velocity6D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Velocity6D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Velocity6D, name)
__swig_setmethods__["xd"] = _almath.Velocity6D_xd_set
__swig_getmethods__["xd"] = _almath.Velocity6D_xd_get
if _newclass:xd = _swig_property(_almath.Velocity6D_xd_get, _almath.Velocity6D_xd_set)
__swig_setmethods__["yd"] = _almath.Velocity6D_yd_set
__swig_getmethods__["yd"] = _almath.Velocity6D_yd_get
if _newclass:yd = _swig_property(_almath.Velocity6D_yd_get, _almath.Velocity6D_yd_set)
__swig_setmethods__["zd"] = _almath.Velocity6D_zd_set
__swig_getmethods__["zd"] = _almath.Velocity6D_zd_get
if _newclass:zd = _swig_property(_almath.Velocity6D_zd_get, _almath.Velocity6D_zd_set)
__swig_setmethods__["wxd"] = _almath.Velocity6D_wxd_set
__swig_getmethods__["wxd"] = _almath.Velocity6D_wxd_get
if _newclass:wxd = _swig_property(_almath.Velocity6D_wxd_get, _almath.Velocity6D_wxd_set)
__swig_setmethods__["wyd"] = _almath.Velocity6D_wyd_set
__swig_getmethods__["wyd"] = _almath.Velocity6D_wyd_get
if _newclass:wyd = _swig_property(_almath.Velocity6D_wyd_get, _almath.Velocity6D_wyd_set)
__swig_setmethods__["wzd"] = _almath.Velocity6D_wzd_set
__swig_getmethods__["wzd"] = _almath.Velocity6D_wzd_get
if _newclass:wzd = _swig_property(_almath.Velocity6D_wzd_get, _almath.Velocity6D_wzd_set)
def __init__(self, *args):
"""
__init__(self) -> Velocity6D
__init__(self, float pInit) -> Velocity6D
__init__(self, float pXd, float pYd, float pZd, float pWxd, float pWyd,
float pWzd) -> Velocity6D
__init__(self, vectorFloat pFloats) -> Velocity6D
"""
this = _almath.new_Velocity6D(*args)
try: self.this.append(this)
except: self.this = this
def __add__(self, *args):
"""__add__(self, Velocity6D pVel2) -> Velocity6D"""
return _almath.Velocity6D___add__(self, *args)
def __sub__(self, *args):
"""__sub__(self, Velocity6D pVel2) -> Velocity6D"""
return _almath.Velocity6D___sub__(self, *args)
def __pos__(self):
"""__pos__(self) -> Velocity6D"""
return _almath.Velocity6D___pos__(self)
def __neg__(self):
"""__neg__(self) -> Velocity6D"""
return _almath.Velocity6D___neg__(self)
def __mul__(self, *args):
"""__mul__(self, float pVal) -> Velocity6D"""
return _almath.Velocity6D___mul__(self, *args)
def __div__(self, *args):
"""__div__(self, float pVal) -> Velocity6D"""
return _almath.Velocity6D___div__(self, *args)
def __eq__(self, *args):
"""__eq__(self, Velocity6D pVel2) -> bool"""
return _almath.Velocity6D___eq__(self, *args)
def __ne__(self, *args):
"""__ne__(self, Velocity6D pVel2) -> bool"""
return _almath.Velocity6D___ne__(self, *args)
def __imul__(self, *args):
"""__imul__(self, float pVal) -> Velocity6D"""
return _almath.Velocity6D___imul__(self, *args)
def __idiv__(self, *args):
"""__idiv__(self, float pVal) -> Velocity6D"""
return _almath.Velocity6D___idiv__(self, *args)
def isNear(self, *args):
"""
isNear(self, Velocity6D pVel2, float pEpsilon = 0.0001) -> bool
isNear(self, Velocity6D pVel2) -> bool
"""
return _almath.Velocity6D_isNear(self, *args)
def norm(self):
"""norm(self) -> float"""
return _almath.Velocity6D_norm(self)
def normalize(self):
"""normalize(self) -> Velocity6D"""
return _almath.Velocity6D_normalize(self)
def toVector(self):
"""toVector(self) -> vectorFloat"""
return _almath.Velocity6D_toVector(self)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.Velocity6D___repr__(self)
def __rmul__(self, *args):
"""__rmul__(self, float lhs) -> Velocity6D"""
return _almath.Velocity6D___rmul__(self, *args)
__swig_destroy__ = _almath.delete_Velocity6D
__del__ = lambda self : None;
Velocity6D_swigregister = _almath.Velocity6D_swigregister
Velocity6D_swigregister(Velocity6D)
class TransformAndVelocity6D(_object):
"""Proxy of C++ AL::Math::TransformAndVelocity6D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TransformAndVelocity6D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TransformAndVelocity6D, name)
__swig_setmethods__["T"] = _almath.TransformAndVelocity6D_T_set
__swig_getmethods__["T"] = _almath.TransformAndVelocity6D_T_get
if _newclass:T = _swig_property(_almath.TransformAndVelocity6D_T_get, _almath.TransformAndVelocity6D_T_set)
__swig_setmethods__["V"] = _almath.TransformAndVelocity6D_V_set
__swig_getmethods__["V"] = _almath.TransformAndVelocity6D_V_get
if _newclass:V = _swig_property(_almath.TransformAndVelocity6D_V_get, _almath.TransformAndVelocity6D_V_set)
def isNear(self, *args):
"""
isNear(self, TransformAndVelocity6D pTV2, float pEpsilon = 0.0001) -> bool
isNear(self, TransformAndVelocity6D pTV2) -> bool
"""
return _almath.TransformAndVelocity6D_isNear(self, *args)
def __repr__(self):
"""__repr__(self) -> char"""
return _almath.TransformAndVelocity6D___repr__(self)
def __init__(self):
"""__init__(self) -> TransformAndVelocity6D"""
this = _almath.new_TransformAndVelocity6D()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _almath.delete_TransformAndVelocity6D
__del__ = lambda self : None;
TransformAndVelocity6D_swigregister = _almath.TransformAndVelocity6D_swigregister
TransformAndVelocity6D_swigregister(TransformAndVelocity6D)
def norm(*args):
"""
norm(Position2D pPos) -> float
norm(Position3D pPos) -> float
norm(Position6D pPos) -> float
norm(Quaternion pQua) -> float
norm(Rotation3D pRot) -> float
norm(Transform pT) -> float
norm(Velocity3D pVel) -> float
norm(Velocity6D pVel) -> float
"""
return _almath.norm(*args)
def normalize(*args):
"""
normalize(Position2D pPos) -> Position2D
normalize(Position3D pPos) -> Position3D
normalize(Position6D pPos) -> Position6D
normalize(Quaternion pQua) -> Quaternion
normalize(Velocity3D pVel) -> Velocity3D
normalize(Velocity6D pVel) -> Velocity6D
"""
return _almath.normalize(*args)
def getDubinsSolutions(*args):
"""getDubinsSolutions(Pose2D pTargetPose, float pCircleRadius) -> vectorPose2D"""
return _almath.getDubinsSolutions(*args)
def avoidFootCollision(*args):
"""
avoidFootCollision(vectorPose2D pLFootBoundingBox, vectorPose2D pRFootBoundingBox,
bool pIsLeftSupport, Pose2D pMove) -> bool
"""
return _almath.avoidFootCollision(*args)
def clipFootWithEllipse(*args):
"""clipFootWithEllipse(float pMaxFootX, float pMaxFootY, Pose2D pMove) -> bool"""
return _almath.clipFootWithEllipse(*args)
def transformLogarithmInPlace(*args):
"""transformLogarithmInPlace(Transform pT, Velocity6D pVel)"""
return _almath.transformLogarithmInPlace(*args)
def transformLogarithm(*args):
"""transformLogarithm(Transform pT) -> Velocity6D"""
return _almath.transformLogarithm(*args)
def velocityExponential(*args):
"""velocityExponential(Velocity6D pVel) -> Transform"""
return _almath.velocityExponential(*args)
def velocityExponentialInPlace(*args):
"""velocityExponentialInPlace(Velocity6D pVel, Transform pT)"""
return _almath.velocityExponentialInPlace(*args)
def changeReferenceVelocity6D(*args):
"""changeReferenceVelocity6D(Transform pT, Velocity6D pVelIn, Velocity6D pVelOut)"""
return _almath.changeReferenceVelocity6D(*args)
def changeReferencePosition6D(*args):
"""changeReferencePosition6D(Transform pT, Position6D pPosIn, Position6D pPosOut)"""
return _almath.changeReferencePosition6D(*args)
def changeReferencePosition3DInPlace(*args):
"""changeReferencePosition3DInPlace(Transform pT, Position3D pPosOut)"""
return _almath.changeReferencePosition3DInPlace(*args)
def changeReferenceTransposePosition3DInPlace(*args):
"""changeReferenceTransposePosition3DInPlace(Transform pT, Position3D pPosOut)"""
return _almath.changeReferenceTransposePosition3DInPlace(*args)
def changeReferencePosition3D(*args):
"""changeReferencePosition3D(Transform pT, Position3D pPosIn, Position3D pPosOut)"""
return _almath.changeReferencePosition3D(*args)
def changeReferenceTransposePosition3D(*args):
"""changeReferenceTransposePosition3D(Transform pT, Position3D pPosIn, Position3D pPosOut)"""
return _almath.changeReferenceTransposePosition3D(*args)
def changeReferenceTransform(*args):
"""changeReferenceTransform(Transform pT, Transform pTIn, Transform pTOut)"""
return _almath.changeReferenceTransform(*args)
def changeReferenceTransposeTransform(*args):
"""changeReferenceTransposeTransform(Transform pT, Transform pTIn, Transform pTOut)"""
return _almath.changeReferenceTransposeTransform(*args)
def changeReferenceTransposeVelocity6D(*args):
"""changeReferenceTransposeVelocity6D(Transform pT, Velocity6D pVelIn, Velocity6D pVelOut)"""
return _almath.changeReferenceTransposeVelocity6D(*args)
def changeReferenceTransposePosition6D(*args):
"""changeReferenceTransposePosition6D(Transform pT, Position6D pPosIn, Position6D pPosOut)"""
return _almath.changeReferenceTransposePosition6D(*args)
def transformMeanInPlace(*args):
"""transformMeanInPlace(Transform pTIn1, Transform pTIn2, float pVal, Transform pTOut)"""
return _almath.transformMeanInPlace(*args)
def transformFromPosition3DInPlace(*args):
"""transformFromPosition3DInPlace(Position3D pPosition, Transform pTransform)"""
return _almath.transformFromPosition3DInPlace(*args)
def transformFromPosition3D(*args):
"""transformFromPosition3D(Position3D pPosition) -> Transform"""
return _almath.transformFromPosition3D(*args)
def transformFromRotationInPlace(*args):
"""transformFromRotationInPlace(Rotation pRotation, Transform pTransform)"""
return _almath.transformFromRotationInPlace(*args)
def transformFromRotation(*args):
"""transformFromRotation(Rotation pRotation) -> Transform"""
return _almath.transformFromRotation(*args)
def rotationFromTransformInPlace(*args):
"""rotationFromTransformInPlace(Transform pTransform, Rotation pRotation)"""
return _almath.rotationFromTransformInPlace(*args)
def rotationFromTransform(*args):
"""rotationFromTransform(Transform pTransform) -> Rotation"""
return _almath.rotationFromTransform(*args)
def rotation3DFromRotation(*args):
"""rotation3DFromRotation(Rotation pRotation) -> Rotation3D"""
return _almath.rotation3DFromRotation(*args)
def position6DFromTransformInPlace(*args):
"""position6DFromTransformInPlace(Transform pT, Position6D pPos)"""
return _almath.position6DFromTransformInPlace(*args)
def position6DFromTransform(*args):
"""position6DFromTransform(Transform pT) -> Position6D"""
return _almath.position6DFromTransform(*args)
def transformFromPose2DInPlace(*args):
"""transformFromPose2DInPlace(Pose2D pPose, Transform pT)"""
return _almath.transformFromPose2DInPlace(*args)
def transformFromPose2D(*args):
"""transformFromPose2D(Pose2D pPose) -> Transform"""
return _almath.transformFromPose2D(*args)
def pose2DFromTransformInPlace(*args):
"""pose2DFromTransformInPlace(Transform pT, Pose2D pPos)"""
return _almath.pose2DFromTransformInPlace(*args)
def pose2DFromTransform(*args):
"""pose2DFromTransform(Transform pT) -> Pose2D"""
return _almath.pose2DFromTransform(*args)
def transformFromRotation3D(*args):
"""transformFromRotation3D(Rotation3D pRotation) -> Transform"""
return _almath.transformFromRotation3D(*args)
def transformFromPosition6D(*args):
"""transformFromPosition6D(Position6D pPosition6D) -> Transform"""
return _almath.transformFromPosition6D(*args)
def position6DFromTransformDiffInPlace(*args):
"""position6DFromTransformDiffInPlace(Transform pCurrent, Transform pTarget, Position6D result)"""
return _almath.position6DFromTransformDiffInPlace(*args)
def position6DFromTransformDiff(*args):
"""position6DFromTransformDiff(Transform pCurrent, Transform pTarget) -> Position6D"""
return _almath.position6DFromTransformDiff(*args)
def position3DFromTransformInPlace(*args):
"""position3DFromTransformInPlace(Transform pT, Position3D pPos)"""
return _almath.position3DFromTransformInPlace(*args)
def position3DFromTransform(*args):
"""position3DFromTransform(Transform pT) -> Position3D"""
return _almath.position3DFromTransform(*args)
def rotation3DFromTransform(*args):
"""rotation3DFromTransform(Transform pT) -> Rotation3D"""
return _almath.rotation3DFromTransform(*args)
def transformFromQuaternion(*args):
"""transformFromQuaternion(Quaternion pQua) -> Transform"""
return _almath.transformFromQuaternion(*args)
def quaternionFromTransform(*args):
"""quaternionFromTransform(Transform pT) -> Quaternion"""
return _almath.quaternionFromTransform(*args)
def clipData(*args):
"""clipData(float pMin, float pMax, float pData) -> bool"""
return _almath.clipData(*args)
def position6DFromVelocity6D(*args):
"""position6DFromVelocity6D(Velocity6D pVel) -> Position6D"""
return _almath.position6DFromVelocity6D(*args)
_4_PI_ = cvar._4_PI_
_2_PI_ = cvar._2_PI_
PI = cvar.PI
PI_2 = cvar.PI_2
PI_4 = cvar.PI_4
TO_RAD = cvar.TO_RAD
TO_DEG = cvar.TO_DEG
def transformMean(*args):
"""
transformMean(Transform pTIn1, Transform pTIn2, float pVal = 0.5) -> Transform
transformMean(Transform pTIn1, Transform pTIn2) -> Transform
"""
return _almath.transformMean(*args)
def transformFromRotationPosition3D(*args):
"""
transformFromRotationPosition3D(Rotation pRot, float pX, float pY, float pZ) -> Transform
transformFromRotationPosition3D(Rotation pRot, Position3D pPos) -> Transform
"""
return _almath.transformFromRotationPosition3D(*args)
def transformFromRotVecInPlace(*args):
"""
transformFromRotVecInPlace(int pAxis, float pTheta, Position3D pPos, Transform pT)
transformFromRotVecInPlace(Position3D pPos, Transform pT)
"""
return _almath.transformFromRotVecInPlace(*args)
def transformFromRotVec(*args):
"""
transformFromRotVec(int pAxis, float pTheta, Position3D pPos) -> Transform
transformFromRotVec(Position3D pPos) -> Transform
transformFromRotVec(int pAxis, float pTheta) -> Transform
"""
return _almath.transformFromRotVec(*args)
def axisRotationProjection(*args):
"""
axisRotationProjection(Position3D pPos, Transform pT) -> Transform
axisRotationProjection(Position3D pAxis, Rotation pRot) -> Rotation
"""
return _almath.axisRotationProjection(*args)
def axisRotationProjectionInPlace(*args):
"""
axisRotationProjectionInPlace(Position3D pPos, Transform pT)
axisRotationProjectionInPlace(Position3D pPos, Rotation pRot)
"""
return _almath.axisRotationProjectionInPlace(*args)
def orthogonalSpace(*args):
"""
orthogonalSpace(Position3D pPos, Transform pTOut)
orthogonalSpace(Position3D pPos) -> Transform
"""
return _almath.orthogonalSpace(*args)
def __mul__(*args):
"""
__mul__(float pM, Position2D pPos1) -> Position2D
__mul__(float pM, Position3D pPos1) -> Position3D
__mul__(float pM, Velocity3D pVel1) -> Velocity3D
__mul__(float pVal, Velocity6D pVel) -> Velocity6D
__mul__(Transform pT, Position3D pPos) -> Position3D
__mul__(Rotation pRot, Position3D pPos) -> Position3D
__mul__(float pVal, Position6D pPos) -> Velocity6D
"""
return _almath.__mul__(*args)
def rotationFromAngleDirection(*args):
"""
rotationFromAngleDirection(float pAngle, float pX, float pY, float pZ) -> Rotation
rotationFromAngleDirection(float pTheta, Position3D pPos) -> Rotation
"""
return _almath.rotationFromAngleDirection(*args)
|
{
"content_hash": "d4c2ec8ac0c04a7b0705393427b97026",
"timestamp": "",
"source": "github",
"line_count": 2531,
"max_line_length": 113,
"avg_line_length": 37.22797313314895,
"alnum_prop": 0.6240872813720496,
"repo_name": "AliGhahraei/nao-classroom",
"id": "0766d8b81378f443b539a8990e16513e6cb43a82",
"size": "94495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "naoqi/almath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165140"
}
],
"symlink_target": ""
}
|
"""Self-test suite for Crypto.PublicKey.RSA"""
__revision__ = "$Id$"
import os
import pickle
from pickle import PicklingError
from Crypto.Util.py3compat import *
import unittest
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
class RSATest(unittest.TestCase):
# Test vectors from "RSA-OAEP and RSA-PSS test vectors (.zip file)"
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
# See RSADSI's PKCS#1 page at
# http://www.rsa.com/rsalabs/node.asp?id=2125
# from oaep-int.txt
# TODO: PyCrypto treats the message as starting *after* the leading "00"
# TODO: That behaviour should probably be changed in the future.
plaintext = """
eb 7a 19 ac e9 e3 00 63 50 e3 29 50 4b 45 e2
ca 82 31 0b 26 dc d8 7d 5c 68 f1 ee a8 f5 52 67
c3 1b 2e 8b b4 25 1f 84 d7 e0 b2 c0 46 26 f5 af
f9 3e dc fb 25 c9 c2 b3 ff 8a e1 0e 83 9a 2d db
4c dc fe 4f f4 77 28 b4 a1 b7 c1 36 2b aa d2 9a
b4 8d 28 69 d5 02 41 21 43 58 11 59 1b e3 92 f9
82 fb 3e 87 d0 95 ae b4 04 48 db 97 2f 3a c1 4f
7b c2 75 19 52 81 ce 32 d2 f1 b7 6d 4d 35 3e 2d
"""
ciphertext = """
12 53 e0 4d c0 a5 39 7b b4 4a 7a b8 7e 9b f2 a0
39 a3 3d 1e 99 6f c8 2a 94 cc d3 00 74 c9 5d f7
63 72 20 17 06 9e 52 68 da 5d 1c 0b 4f 87 2c f6
53 c1 1d f8 23 14 a6 79 68 df ea e2 8d ef 04 bb
6d 84 b1 c3 1d 65 4a 19 70 e5 78 3b d6 eb 96 a0
24 c2 ca 2f 4a 90 fe 9f 2e f5 c9 c1 40 e5 bb 48
da 95 36 ad 87 00 c8 4f c9 13 0a de a7 4e 55 8d
51 a7 4d df 85 d8 b5 0d e9 68 38 d6 06 3e 09 55
"""
modulus = """
bb f8 2f 09 06 82 ce 9c 23 38 ac 2b 9d a8 71 f7
36 8d 07 ee d4 10 43 a4 40 d6 b6 f0 74 54 f5 1f
b8 df ba af 03 5c 02 ab 61 ea 48 ce eb 6f cd 48
76 ed 52 0d 60 e1 ec 46 19 71 9d 8a 5b 8b 80 7f
af b8 e0 a3 df c7 37 72 3e e6 b4 b7 d9 3a 25 84
ee 6a 64 9d 06 09 53 74 88 34 b2 45 45 98 39 4e
e0 aa b1 2d 7b 61 a5 1f 52 7a 9a 41 f6 c1 68 7f
e2 53 72 98 ca 2a 8f 59 46 f8 e5 fd 09 1d bd cb
"""
e = 0x11 # public exponent
prime_factor = """
c9 7f b1 f0 27 f4 53 f6 34 12 33 ea aa d1 d9 35
3f 6c 42 d0 88 66 b1 d0 5a 0f 20 35 02 8b 9d 86
98 40 b4 16 66 b4 2e 92 ea 0d a3 b4 32 04 b5 cf
ce 33 52 52 4d 04 16 a5 a4 41 e7 00 af 46 15 03
"""
def setUp(self):
global RSA, Random, bytes_to_long
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.Util.number import bytes_to_long, inverse
self.n = bytes_to_long(a2b_hex(self.modulus))
self.p = bytes_to_long(a2b_hex(self.prime_factor))
# Compute q, d, and u from n, e, and p
self.q = self.n // self.p
self.d = inverse(self.e, (self.p-1)*(self.q-1))
self.u = inverse(self.p, self.q) # u = e**-1 (mod q)
self.rsa = RSA
def test_generate_1arg(self):
"""RSA (default implementation) generated key (1 argument)"""
rsaObj = self.rsa.generate(1024)
self._check_private_key(rsaObj)
self._exercise_primitive(rsaObj)
pub = rsaObj.publickey()
self._check_public_key(pub)
self._exercise_public_primitive(rsaObj)
def test_generate_2arg(self):
"""RSA (default implementation) generated key (2 arguments)"""
rsaObj = self.rsa.generate(1024, Random.new().read)
self._check_private_key(rsaObj)
self._exercise_primitive(rsaObj)
pub = rsaObj.publickey()
self._check_public_key(pub)
self._exercise_public_primitive(rsaObj)
def test_generate_3args(self):
rsaObj = self.rsa.generate(1024, Random.new().read,e=65537)
self._check_private_key(rsaObj)
self._exercise_primitive(rsaObj)
pub = rsaObj.publickey()
self._check_public_key(pub)
self._exercise_public_primitive(rsaObj)
self.assertEqual(65537,rsaObj.e)
def test_construct_2tuple(self):
"""RSA (default implementation) constructed key (2-tuple)"""
pub = self.rsa.construct((self.n, self.e))
self._check_public_key(pub)
self._check_encryption(pub)
def test_construct_3tuple(self):
"""RSA (default implementation) constructed key (3-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d))
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_4tuple(self):
"""RSA (default implementation) constructed key (4-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p))
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_5tuple(self):
"""RSA (default implementation) constructed key (5-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q))
self._check_private_key(rsaObj)
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_6tuple(self):
"""RSA (default implementation) constructed key (6-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q, self.u))
self._check_private_key(rsaObj)
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_bad_key2(self):
tup = (self.n, 1)
self.assertRaises(ValueError, self.rsa.construct, tup)
# An even modulus is wrong
tup = (self.n+1, self.e)
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_construct_bad_key3(self):
tup = (self.n, self.e, self.d+1)
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_construct_bad_key5(self):
tup = (self.n, self.e, self.d, self.p, self.p)
self.assertRaises(ValueError, self.rsa.construct, tup)
tup = (self.p*self.p, self.e, self.p, self.p)
self.assertRaises(ValueError, self.rsa.construct, tup)
tup = (self.p*self.p, 3, self.p, self.q)
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_construct_bad_key6(self):
tup = (self.n, self.e, self.d, self.p, self.q, 10)
self.assertRaises(ValueError, self.rsa.construct, tup)
from Crypto.Util.number import inverse
tup = (self.n, self.e, self.d, self.p, self.q, inverse(self.q, self.p))
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_factoring(self):
rsaObj = self.rsa.construct([self.n, self.e, self.d])
self.assertTrue(rsaObj.p==self.p or rsaObj.p==self.q)
self.assertTrue(rsaObj.q==self.p or rsaObj.q==self.q)
self.assertTrue(rsaObj.q*rsaObj.p == self.n)
self.assertRaises(ValueError, self.rsa.construct, [self.n, self.e, self.n-1])
def test_repr(self):
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q))
repr(rsaObj)
def test_serialization(self):
"""RSA keys are unpickable"""
rsa_key = self.rsa.generate(1024)
self.assertRaises(PicklingError, pickle.dumps, rsa_key)
def test_raw_rsa_boundary(self):
# The argument of every RSA raw operation (encrypt/decrypt) must be positive
# and no larger than the modulus
rsa_obj = self.rsa.generate(1024)
self.assertRaises(ValueError, rsa_obj._decrypt, rsa_obj.n)
self.assertRaises(ValueError, rsa_obj._encrypt, rsa_obj.n)
self.assertRaises(ValueError, rsa_obj._decrypt, 0)
self.assertRaises(ValueError, rsa_obj._encrypt, 0)
def test_size(self):
pub = self.rsa.construct((self.n, self.e))
self.assertEqual(pub.size_in_bits(), 1024)
self.assertEqual(pub.size_in_bytes(), 128)
def _check_private_key(self, rsaObj):
from Crypto.Math.Numbers import Integer
# Check capabilities
self.assertEqual(1, rsaObj.has_private())
# Sanity check key data
self.assertEqual(rsaObj.n, rsaObj.p * rsaObj.q) # n = pq
lcm = int(Integer(rsaObj.p-1).lcm(rsaObj.q-1))
self.assertEqual(1, rsaObj.d * rsaObj.e % lcm) # ed = 1 (mod LCM(p-1, q-1))
self.assertEqual(1, rsaObj.p * rsaObj.u % rsaObj.q) # pu = 1 (mod q)
self.assertEqual(1, rsaObj.p > 1) # p > 1
self.assertEqual(1, rsaObj.q > 1) # q > 1
self.assertEqual(1, rsaObj.e > 1) # e > 1
self.assertEqual(1, rsaObj.d > 1) # d > 1
def _check_public_key(self, rsaObj):
ciphertext = a2b_hex(self.ciphertext)
# Check capabilities
self.assertEqual(0, rsaObj.has_private())
# Check rsaObj.[ne] -> rsaObj.[ne] mapping
self.assertEqual(rsaObj.n, rsaObj.n)
self.assertEqual(rsaObj.e, rsaObj.e)
# Check that private parameters are all missing
self.assertEqual(0, hasattr(rsaObj, 'd'))
self.assertEqual(0, hasattr(rsaObj, 'p'))
self.assertEqual(0, hasattr(rsaObj, 'q'))
self.assertEqual(0, hasattr(rsaObj, 'u'))
# Sanity check key data
self.assertEqual(1, rsaObj.e > 1) # e > 1
# Public keys should not be able to sign or decrypt
self.assertRaises(TypeError, rsaObj._decrypt,
bytes_to_long(ciphertext))
# Check __eq__ and __ne__
self.assertEqual(rsaObj.publickey() == rsaObj.publickey(),True) # assert_
self.assertEqual(rsaObj.publickey() != rsaObj.publickey(),False) # failIf
def _exercise_primitive(self, rsaObj):
# Since we're using a randomly-generated key, we can't check the test
# vector, but we can make sure encryption and decryption are inverse
# operations.
ciphertext = bytes_to_long(a2b_hex(self.ciphertext))
# Test decryption
plaintext = rsaObj._decrypt(ciphertext)
# Test encryption (2 arguments)
new_ciphertext2 = rsaObj._encrypt(plaintext)
self.assertEqual(ciphertext, new_ciphertext2)
def _exercise_public_primitive(self, rsaObj):
plaintext = a2b_hex(self.plaintext)
# Test encryption (2 arguments)
new_ciphertext2 = rsaObj._encrypt(bytes_to_long(plaintext))
def _check_encryption(self, rsaObj):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
# Test encryption
new_ciphertext2 = rsaObj._encrypt(bytes_to_long(plaintext))
self.assertEqual(bytes_to_long(ciphertext), new_ciphertext2)
def _check_decryption(self, rsaObj):
plaintext = bytes_to_long(a2b_hex(self.plaintext))
ciphertext = bytes_to_long(a2b_hex(self.ciphertext))
# Test plain decryption
new_plaintext = rsaObj._decrypt(ciphertext)
self.assertEqual(plaintext, new_plaintext)
def get_tests(config={}):
tests = []
tests += list_test_cases(RSATest)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
{
"content_hash": "5c5305eff75e7000be69515977ac3b3f",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 85,
"avg_line_length": 37.99312714776632,
"alnum_prop": 0.6192112879884226,
"repo_name": "marcuskelly/recover",
"id": "839ab25ce2a35f88dd6f7f102a66be3a8aa1bc88",
"size": "12176",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/site-packages/Crypto/SelfTest/PublicKey/test_RSA.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1242"
},
{
"name": "C",
"bytes": "443857"
},
{
"name": "C++",
"bytes": "134770"
},
{
"name": "CSS",
"bytes": "21746"
},
{
"name": "HTML",
"bytes": "47112"
},
{
"name": "JavaScript",
"bytes": "29289"
},
{
"name": "Mako",
"bytes": "9381"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "16837827"
},
{
"name": "Shell",
"bytes": "2069"
},
{
"name": "Tcl",
"bytes": "1285363"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from ..internal import IntentsSchema
class AlexaBaseCommand(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="app_label",
nargs="*",
help="Restricts data to the specified app_label",
)
parser.add_argument(
"-a",
"--all",
action="store_true",
dest="do_all_apps",
default=False,
help="If specified will return all apps schema's",
)
def handle(self, *app_labels, **options):
do_all_apps = options.get("do_all_apps")
if len(app_labels) == 0:
if do_all_apps:
app_labels = IntentsSchema.apps.keys()
else:
app_labels = ["base"]
for app in app_labels:
self.do_work(app)
|
{
"content_hash": "fab4bde6970043bea2893036a47194fb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 62,
"avg_line_length": 30.419354838709676,
"alnum_prop": 0.5334040296924708,
"repo_name": "rocktavious/django-alexa",
"id": "4e4a52da1ede506d9c6ceb99866022eab425b471",
"size": "943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_alexa/management/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30122"
}
],
"symlink_target": ""
}
|
import contextlib
import mock
from neutron.api.v2 import attributes
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests.unit.ryu import fake_ryu
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
PLUGIN_NAME = ('neutron.plugins.ryu.'
'ryu_neutron_plugin.RyuNeutronPluginV2')
NOTIFIER = ('neutron.plugins.ryu.'
'ryu_neutron_plugin.AgentNotifierApi')
class RyuSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self, plugin=None):
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
self.addCleanup(mock.patch.stopall)
self.fake_ryu = fake_ryu.patch_fake_ryu_client().start()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
self._attribute_map_bk_ = {}
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
self._attribute_map_bk_[item] = (attributes.
RESOURCE_ATTRIBUTE_MAP[item].
copy())
super(RyuSecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
def tearDown(self):
super(RyuSecurityGroupsTestCase, self).tearDown()
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_
class TestRyuSecurityGroups(RyuSecurityGroupsTestCase,
test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin):
def test_security_group_get_port_from_device(self):
with contextlib.nested(self.network(),
self.security_group()) as (n, sg):
with self.subnet(n):
security_group_id = sg['security_group']['id']
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
fixed_ips = port['port']['fixed_ips']
data = {'port': {'fixed_ips': fixed_ips,
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[security_group_id]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
port_id = res['port']['id']
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin.callbacks[0].get_port_from_device(port_id)
self.assertEqual(port_id, port_dict['id'])
self.assertEqual([security_group_id],
port_dict[ext_sg.SECURITYGROUPS])
self.assertEqual([], port_dict['security_group_rules'])
self.assertEqual([fixed_ips[0]['ip_address']],
port_dict['fixed_ips'])
self._delete('ports', port_id)
def test_security_group_get_port_from_device_with_no_port(self):
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin.callbacks[0].get_port_from_device('bad_device_id')
self.assertIsNone(port_dict)
class TestRyuSecurityGroupsXML(TestRyuSecurityGroups):
fmt = 'xml'
|
{
"content_hash": "59c9b0b4f70245d30bb00c08176ebb23",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 44.49367088607595,
"alnum_prop": 0.5738264580369844,
"repo_name": "beagles/neutron_hacking",
"id": "3293508ca472012fdd0c8d6414df5d4798c0ea70",
"size": "4203",
"binary": false,
"copies": "1",
"ref": "refs/heads/neutron_oslo_messaging",
"path": "neutron/tests/unit/ryu/test_ryu_security_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8125263"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('person', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='email',
name='primary',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
{
"content_hash": "a8a57b69e751be60be7a5d97726c2252",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 21.157894736842106,
"alnum_prop": 0.582089552238806,
"repo_name": "wpjesus/codematch",
"id": "dca735bd21f0cd8b90f0fbb520fc1ea5dbf51627",
"size": "426",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "ietf/person/migrations/0002_email_primary.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "139492"
},
{
"name": "CSS",
"bytes": "733662"
},
{
"name": "Groff",
"bytes": "2349"
},
{
"name": "HTML",
"bytes": "2149789"
},
{
"name": "JavaScript",
"bytes": "1003699"
},
{
"name": "Makefile",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "17323"
},
{
"name": "PostScript",
"bytes": "35"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "4536908"
},
{
"name": "Shell",
"bytes": "74113"
},
{
"name": "TeX",
"bytes": "2556"
}
],
"symlink_target": ""
}
|
"""
Demo XML format translation, specifically one thesaurus information to another.
Specifically, convert Zthes-in-XML [1] file to SKOS [2]
[1] http://zthes.z3950.org/schema/index.html
[2] http://www.w3.org/2004/02/skos/
Sample usage:
zthes2skos.py http://zthes.z3950.org/schema/sample-05.xml
"""
MORE_DOC = """
Zthes relation types explained at http://www.loc.gov/z3950/agency/profiles/zthes-02.html
``NT'' Narrower term: that is, the related term is more specific than the current one. -> skos:narrower
``BT'' Broader term: that is, the related term is more general than the current one. -> skos:broader
``USE'' Use instead: that is, the related term should be used in preference to the current one. -> z:useInstead
``UF'' Use for: that is, the current term should be used in preference to the related one -> z:useFor
``RT'' Related term. -> skos:related
See also:
* http://www.w3.org/2001/sw/Europe/reports/thes/1.0/migrate/
"""
import sys
import amara
from amara.writers.struct import *
from amara.namespaces import *
ZTHES_NAMESPACE = u"http://zthes.z3950.org/model/index.html"
#http://www.loc.gov/z3950/agency/profiles/zthes-02.html
RELATION_LOOKUP = {
u'RT': (SKOS_NAMESPACE, u'skos:related'),
u'NT': (SKOS_NAMESPACE, u'skos:narrower'),
u'BT': (SKOS_NAMESPACE, u'skos:broader'),
u'USE': (ZTHES_NAMESPACE, u'z:useInstead'),
u'UF': (ZTHES_NAMESPACE, u'z:useFor'),
}
doc = amara.parse(sys.argv[1])
w = structwriter(indent=u"yes").feed(
ROOT(
E((RDF_NAMESPACE, u'rdf:RDF'),
NS(u'skos', SKOS_NAMESPACE),
NS(u'z', ZTHES_NAMESPACE),
(
E(
(SKOS_NAMESPACE, u'skos:Concept'),
{(RDF_NAMESPACE, u'rdf:ID'): term.xml_select(u'string(termId)')},
E((SKOS_NAMESPACE, u'skos:prefLabel'), term.xml_select(u'string(termName)')),
(E((SKOS_NAMESPACE, u'skos:note'),
E((SKOS_NAMESPACE, u'skos:Note'),
E((RDF_NAMESPACE, u'rdf:label'), note.xml_select(u'string(@label)')),
E((RDF_NAMESPACE, u'rdf:value'), note.xml_select(u'string(.)'))
)
) for note in term.xml_select(u'termNote') ),
(E(RELATION_LOOKUP.get(rel.xml_select(u'string(relationType)'), (ZTHES_NAMESPACE, u'z:'+rel.xml_local)),
{(RDF_NAMESPACE, u'rdf:resource'): rel.xml_select(u'concat("#", termId)')}
) for rel in term.xml_select(u'relation') )
)
#E((SKOS_NAMESPACE, u'skos:note'), term.xml_select(u'string(termName)')),
for term in doc.xml_select(u'/Zthes/term'))
)
))
print
|
{
"content_hash": "5a2c2c43d5b2f82e45e3905388a2bd0d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 112,
"avg_line_length": 35.140845070422536,
"alnum_prop": 0.6589178356713427,
"repo_name": "zepheira/amara",
"id": "008fe93367160e5392093ad6ac6fe935cf19e1f6",
"size": "2495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/zthes2skos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1830216"
},
{
"name": "C++",
"bytes": "82201"
},
{
"name": "GLSL",
"bytes": "5081"
},
{
"name": "HTML",
"bytes": "578831"
},
{
"name": "JavaScript",
"bytes": "18734"
},
{
"name": "Logos",
"bytes": "175"
},
{
"name": "Objective-C",
"bytes": "26041"
},
{
"name": "Python",
"bytes": "1507578"
},
{
"name": "Shell",
"bytes": "2497"
},
{
"name": "XSLT",
"bytes": "398316"
}
],
"symlink_target": ""
}
|
import textwrap
from contextlib import contextmanager
class TextWrapper(textwrap.TextWrapper):
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
space_left = max(width - cur_len, 1)
if self.break_long_words:
last = reversed_chunks[-1]
cut = last[:space_left]
res = last[space_left:]
cur_line.append(cut)
reversed_chunks[-1] = res
elif not cur_line:
cur_line.append(reversed_chunks.pop())
@contextmanager
def extra_indent(self, indent):
old_initial_indent = self.initial_indent
old_subsequent_indent = self.subsequent_indent
self.initial_indent += indent
self.subsequent_indent += indent
try:
yield
finally:
self.initial_indent = old_initial_indent
self.subsequent_indent = old_subsequent_indent
def indent_only(self, text):
rv = []
for idx, line in enumerate(text.splitlines()):
indent = self.initial_indent
if idx > 0:
indent = self.subsequent_indent
rv.append(f"{indent}{line}")
return "\n".join(rv)
|
{
"content_hash": "e4ad98d080d37f204c6bdb3c5d87c11d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 32.45945945945946,
"alnum_prop": 0.5828476269775187,
"repo_name": "rochacbruno/dynaconf",
"id": "7a052b70df328776293d0b5a3be0f2f53564b44b",
"size": "1201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dynaconf/vendor_src/click/_textwrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2867"
},
{
"name": "Makefile",
"bytes": "11505"
},
{
"name": "Python",
"bytes": "1438471"
},
{
"name": "Shell",
"bytes": "14740"
}
],
"symlink_target": ""
}
|
from skybase import schemas
schemas.set_indicators()
#Directory schemas layout of the directories
artiball_schema = [
[[''],['skybase.yaml']],
# [['app'], []],
[['app_config'], ['main_app_config.yaml']],
[['installation'], []],
[['installation', 'chef', 'cookbooks'], []],
[['installation', 'chef', 'databags'], []],
[['installation', 'chef', 'encrypted_databags'], []],
[['deployment'], []],
[['deployment'], ['main_deployment.yaml']],
]
# TODO: move/derive these from artiball schema module
ARTIBALL_SCHEMA_ARGS = {
'code_dir': 'app',
'config_dir': 'app_config',
'config_file': 'main_app_config.yaml',
'deploy_dir': 'deployment',
'deploy_file': 'main_deployment.yaml',
'cookbooks_dir': 'installation/chef/cookbooks',
'cookbooks_order_file': 'installation/chef/cookbook-order.yaml',
'encrypted_data_bag_secret': 'chef/COMMON/encrypted_data_bag_secret',
'databags_dir': 'installation/chef/databags',
'enc_databags_dir': 'installation/chef/encrypted_databags',
'manifest_file': 'manifest.yaml',
}
#File Schema sets
#skybase.yaml schema set
skybase_yaml_schema = [
'''#Configuration file to inform SkyBase client about your application layout.''',
[['packing'], []],
[['packing', 'application'], []],
[['packing', 'application', 'source_location'], [schemas.STR_TYPE_OPTIONAL_INDICATOR]],
[['packing', 'installations'], [schemas.LIST_TYPE]],
]
skybase_installations_yaml_schema = [
[['chef', 'repository_url'], [schemas.STR_TYPE_OPTIONAL_INDICATOR]],
[['chef', 'repository_branch'], [schemas.STR_TYPE_OPTIONAL_INDICATOR]],
[['chef', 'databags'], [schemas.LIST_TYPE_INDICATOR]],
[['chef', 'encrypted_databags'], [schemas.LIST_TYPE_INDICATOR]],
[['chef', 'cookbooks', 'dependencies_from_berkshelf'], [schemas.BOOL_TYPE_INDICATOR]],
]
#main_deployment.yaml schema set
main_deployment_yaml_schema = [
'''#SkyBase Template (SBT) to describe resources for the service and dependencies.''',
[['definition'], []],
[['definition', 'service_name'], [schemas.STR_TYPE_INDICATOR]],
[['definition', 'version'], [schemas.STR_TYPE_INDICATOR]],
[['definition', 'keyname'], [schemas.STR_TYPE_INDICATOR]],
[['definition', 'chef_type'], [schemas.STR_TYPE_INDICATOR]],
[['definition', 'tags'], []],
[['definition', 'tags', 'TeamID'], [schemas.STR_TYPE_INDICATOR]],
[['definition', 'tags', 'ServiceID'], [schemas.STR_TYPE_INDICATOR]],
[['definition', 'tags', 'Email'], [schemas.STR_TYPE_INDICATOR]],
[['stacks'], [schemas.LIST_TYPE]],
]
main_deployment_stacks_yaml_schema = [
[['name'], [schemas.STR_TYPE_INDICATOR]],
[['type'], [schemas.STR_TYPE_INDICATOR]],
[['cloud_template_name'], [schemas.STR_TYPE_INDICATOR]],
[['roles'], [schemas.LIST_TYPE]],
]
main_deployment_stacks_roles_yaml_schema = [
[['name'], [schemas.STR_TYPE_INDICATOR]],
[['userdata_template_name'], [schemas.STR_TYPE_INDICATOR]],
[['type'], [schemas.STR_TYPE_INDICATOR]],
[['ami'], [schemas.STR_TYPE_INDICATOR]],
[['subnet'], [schemas.STR_TYPE_INDICATOR]],
[['instance_type'], [schemas.STR_TYPE_INDICATOR]],
[['root_volume_size'], [schemas.INT_TYPE_INDICATOR]],
[['chef_role_runlist'], [schemas.LIST_TYPE_INDICATOR]],
[['autoscaling'], [schemas.INT_TYPE_INDICATOR]],
[['vpc_zone_identifier'], [schemas.STR_TYPE_INDICATOR]],
[['initial_capacity'], [schemas.INT_TYPE_INDICATOR]],
[['max_capacity'], [schemas.INT_TYPE_INDICATOR]],
]
#main_app_config.yaml schema set
main_app_config_yaml_schema = [
'''#SkyBase Template (SBT) to configure your application.''',
[['common', schemas.DICT_KEY_TYPE_INDICATOR], [schemas.DICT_VALUE_TYPE_INDICATOR]],
[['stacks'], [schemas.LIST_TYPE]],
]
main_app_config_stacks_yaml_schema = [
[['name'], [schemas.STR_TYPE_INDICATOR]],
[['roles'], [schemas.LIST_TYPE]],
]
main_app_config_stacks_roles_yaml_schema = [
[['name'], [schemas.STR_TYPE_INDICATOR]],
[['universes', 'dev', schemas.DICT_KEY_TYPE_INDICATOR], [schemas.DICT_VALUE_TYPE_INDICATOR]],
[['universes', 'qa', schemas.DICT_KEY_TYPE_INDICATOR], [schemas.DICT_VALUE_TYPE_INDICATOR]],
[['universes', 'prod', schemas.DICT_KEY_TYPE_INDICATOR], [schemas.DICT_VALUE_TYPE_INDICATOR]]
]
#TODO: discuss flattening manifest, removing metadata; any changes here must be couple with changes to Arbiball().manifest property
#manifest.yaml schema
manifest_yaml_schema = [
[['metadata', 'app_name'], [schemas.STR_TYPE_INDICATOR]],
[['metadata', 'app_version'], [schemas.STR_TYPE_INDICATOR]],
[['metadata', 'build_id'], [schemas.STR_TYPE_INDICATOR]],
[['chef_cookbook_source'], [schemas.STR_TYPE_INDICATOR]]
]
|
{
"content_hash": "72df8251ceaa3ec62bebc17090bc181e",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 131,
"avg_line_length": 38.74590163934426,
"alnum_prop": 0.6458641844721811,
"repo_name": "lithiumtech/skybase.io",
"id": "e4c4475768e40f44e0f75d09f4d6594fb3f22543",
"size": "4727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skybase/schemas/artiball.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "394577"
},
{
"name": "Ruby",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "3105"
}
],
"symlink_target": ""
}
|
from lemonyellow.models import Slot
class Daycare(Slot):
"""Right now, I'll try to add the ability to host one pokemon in the
daycare, but it's not a big priority. Eventually I'll add the ability to
have two so they can make eggs, but that is very much not a priority."""
original_level = 0
|
{
"content_hash": "392b8fc09d238e7b3d7146ba57cfa1fc",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 34.44444444444444,
"alnum_prop": 0.7096774193548387,
"repo_name": "itsthejoker/Pokemon-Homage",
"id": "faa8aacb1054f4caef128ad923903450f12bfcd9",
"size": "310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lemonyellow/models/daycare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "485031"
}
],
"symlink_target": ""
}
|
"""
ex20161128_tax_cut_comparison.py
Extend the SIM model (Godley and Lavoie, Chapter 3) with a capitalist sub-sector,
and then look at the effect of a tax cut.
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sfc_models.utils import register_standard_logs
from sfc_models.examples.Quick2DPlot import Quick2DPlot
from sfc_models.models import *
from sfc_models.sector import Market
from sfc_models.sector_definitions import Household, ConsolidatedGovernment, TaxFlow, FixedMarginBusiness, Capitalists
def CreateCountry(mod, name, code):
# Create the country.
cntry = Country(mod, code, name)
# Create sectors
ConsolidatedGovernment(cntry, 'GOV', 'Government')
hou = Household(cntry, 'HH', 'Household')
hou.AddVariable('TaxRate', 'Tax rate for workers', '.2')
cap = Capitalists(cntry, 'CAP', 'Capitalists')
cap.AddVariable('TaxRate', 'Tax rate for capitalists', '0.3')
FixedMarginBusiness(cntry, 'BUS', 'Business Sector')
# Create the linkages between sectors - tax flow, markets - labour ('LAB'), goods ('GOOD')
TaxFlow(cntry, 'TF', 'TaxFlow', .2)
Market(cntry, 'LAB', 'Labour market')
Market(cntry, 'GOOD', 'Goods market')
return cntry
def main():
register_standard_logs('output', __file__)
# Create model, which holds all entities
mod = Model()
can = CreateCountry(mod, 'Scenario 1', 'SCEN1')
us = CreateCountry(mod, 'Scenario 2', 'SCEN2')
# Need to set the exogenous variable - Government demand for Goods ("G" in economist symbology)
mod.AddExogenous('SCEN1_GOV', 'DEM_GOOD', '[20.,] * 105')
mod.AddExogenous('SCEN2_GOV', 'DEM_GOOD', '[20.,] * 105')
sfc_models.Parameters.SolveInitialEquilibrium = True
# Generate a $1 tax cut based on steady state values.
# Scenario #1: Tax cut for workers at t=5
# Initial tax rate is 20%, and pays $14.545 in tax, so cut tax rate to 18.6%.
# (tax rate = [.2, .2, .2, .2, .2, .186 ,186 ...]
mod.AddExogenous('SCEN1_HH', 'TaxRate', '[0.2,]*5 + [0.186,]*100')
# Scenario #2: Tax cut for capitalists at t=5
# Initial tax rate is 30%, and pays $5.455 in tax, so cut tax rate to 24.5%.
# (tax rate = [.3, .3, .3, .3, .3, .245 , ,245 ...]
mod.AddExogenous('SCEN2_CAP', 'TaxRate', '[0.3,]*5 + [0.245,]*100')
mod.main()
t = mod.GetTimeSeries('t', cutoff=30)
scen1 = mod.GetTimeSeries('SCEN1_GOOD__SUP_GOOD', cutoff=30)
scen2 = mod.GetTimeSeries('SCEN2_GOOD__SUP_GOOD', cutoff=30)
p = Quick2DPlot([t,t], [scen1, scen2], 'Output - Y',run_now=False)
p.Legend = ['Scenario #1 - Worker Tax Cut', 'Scenario #2 - Capitalist Tax Cut']
p.LegendPos = 'center right'
p.DoPlot()
if __name__ == '__main__':
main()
|
{
"content_hash": "c8779d4fed8151133b3a45611b5783ce",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 118,
"avg_line_length": 41.857142857142854,
"alnum_prop": 0.6773192677629538,
"repo_name": "brianr747/SFC_models",
"id": "33c36e3b1626e7c61375113dd556205922fee3fc",
"size": "3223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfc_models/examples/scripts/ex20161128_tax_cut_comparison.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "433125"
}
],
"symlink_target": ""
}
|
"""DenseNet, implemented in Gluon."""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']
import os
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ...contrib.nn import HybridConcurrent, Identity
# Helpers
def _make_dense_block(num_layers, bn_size, growth_rate, dropout, stage_index):
out = nn.HybridSequential(prefix='stage%d_'%stage_index)
with out.name_scope():
for _ in range(num_layers):
out.add(_make_dense_layer(growth_rate, bn_size, dropout))
return out
def _make_dense_layer(growth_rate, bn_size, dropout):
new_features = nn.HybridSequential(prefix='')
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
if dropout:
new_features.add(nn.Dropout(dropout))
out = HybridConcurrent(axis=1, prefix='')
out.add(Identity())
out.add(new_features)
return out
def _make_transition(num_output_features):
out = nn.HybridSequential(prefix='')
out.add(nn.BatchNorm())
out.add(nn.Activation('relu'))
out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
out.add(nn.AvgPool2D(pool_size=2, strides=2))
return out
# Net
class DenseNet(HybridBlock):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_init_features : int
Number of filters to learn in the first convolution layer.
growth_rate : int
Number of filters to add each layer (`k` in the paper).
block_config : list of int
List of integers for numbers of layers in each pooling block.
bn_size : int, default 4
Multiplicative factor for number of bottle neck layers.
(i.e. bn_size * k features in the bottleneck layer)
dropout : float, default 0
Rate of dropout after each dense layer.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=7,
strides=2, padding=3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.AvgPool2D(pool_size=7))
self.features.add(nn.Flatten())
self.output = nn.Dense(classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
densenet_spec = {121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (64, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32])}
# Constructor
def get_densenet(num_layers, pretrained=False, ctx=cpu(),
root=os.path.join('~', '.mxnet', 'models'), **kwargs):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
num_init_features, growth_rate, block_config = densenet_spec[num_layers]
net = DenseNet(num_init_features, growth_rate, block_config, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx)
return net
def densenet121(**kwargs):
r"""Densenet-BC 121-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(121, **kwargs)
def densenet161(**kwargs):
r"""Densenet-BC 161-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(161, **kwargs)
def densenet169(**kwargs):
r"""Densenet-BC 169-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(169, **kwargs)
def densenet201(**kwargs):
r"""Densenet-BC 201-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(201, **kwargs)
|
{
"content_hash": "39eb9141603eceb6794a3c64e4e9bbe3",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 100,
"avg_line_length": 37.924731182795696,
"alnum_prop": 0.6338247802665155,
"repo_name": "precedenceguo/mxnet",
"id": "b03f5ce8d52ac71b502d3bcc919ae87a825a3b1c",
"size": "7892",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/mxnet/gluon/model_zoo/vision/densenet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "122218"
},
{
"name": "C++",
"bytes": "5364005"
},
{
"name": "CMake",
"bytes": "83993"
},
{
"name": "Cuda",
"bytes": "925835"
},
{
"name": "Groovy",
"bytes": "2673"
},
{
"name": "Java",
"bytes": "122297"
},
{
"name": "Jupyter Notebook",
"bytes": "1275293"
},
{
"name": "Makefile",
"bytes": "62361"
},
{
"name": "Matlab",
"bytes": "34903"
},
{
"name": "Perl",
"bytes": "1275063"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "Python",
"bytes": "5583154"
},
{
"name": "R",
"bytes": "311543"
},
{
"name": "Scala",
"bytes": "1012427"
},
{
"name": "Shell",
"bytes": "263635"
},
{
"name": "Smalltalk",
"bytes": "43774"
}
],
"symlink_target": ""
}
|
import unittest
from two_to_the import two_to_the
class TwoToTheTest(unittest.TestCase):
def test0(self):
self.assertEqual(1, two_to_the(0))
def test1(self):
self.assertEqual(2, two_to_the(1))
def test2(self):
self.assertEqual(4, two_to_the(2))
def test3(self):
self.assertEqual(8, two_to_the(3))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "6e6f8d0027d535864a110833483455fd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 42,
"avg_line_length": 19,
"alnum_prop": 0.6090225563909775,
"repo_name": "keithio/practice-data-structures-and-algorithms",
"id": "1e78bdd022fc2d4255576fd6327772c438a20d70",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bit_manipulation/two_to_the/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56595"
}
],
"symlink_target": ""
}
|
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import xml
from xml.dom.minidom import parse, parseString
def send_email(to, server, subj, body, attachments):
"""Send an email with the given information.
Args:
to: a String, the email address to send the email to
server: a String, the mail server to send from
subj: a String, the subject line of the message
body: a String, the body of the message
attachments: a listof_pathto_File, the attachements to include
"""
msg = MIMEMultipart()
msg['Subject'] = subj
# me == the sender's email address
# family = the list of all recipients' email addresses
msg['From'] = 'AutopsyTest'
msg['To'] = to
msg.preamble = 'This is a test'
container = MIMEText(body, 'plain')
msg.attach(container)
Build_email(msg, attachments)
s = smtplib.SMTP(server)
try:
print('Sending Email')
s.sendmail(msg['From'], msg['To'], msg.as_string())
except Exception as e:
print(str(e))
s.quit()
def Build_email(msg, attachments):
for file in attachments:
part = MIMEBase('application', "octet-stream")
atach = open(file, "rb")
attch = atach.read()
noml = file.split("\\")
nom = noml[len(noml)-1]
part.set_payload(attch)
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="' + nom + '"')
msg.attach(part)
|
{
"content_hash": "cbb10890bea8e5a0852a3806db673da6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 84,
"avg_line_length": 34.204081632653065,
"alnum_prop": 0.6270883054892601,
"repo_name": "raman-bt/autopsy",
"id": "5d12e6afa3213fec3ccdb4adfe3ed383edb33caf",
"size": "1676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/script/Emailer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4505"
},
{
"name": "Java",
"bytes": "3165917"
},
{
"name": "JavaScript",
"bytes": "568"
},
{
"name": "PHP",
"bytes": "660"
},
{
"name": "Perl",
"bytes": "1032751"
},
{
"name": "Python",
"bytes": "138243"
},
{
"name": "Ruby",
"bytes": "1285"
},
{
"name": "Shell",
"bytes": "890"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
def home(request):
return render(request, '{{ project_name }}/home.html', {})
|
{
"content_hash": "67d9c517dae0c64d0d25a82d89f12b39",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 60,
"avg_line_length": 29.25,
"alnum_prop": 0.7094017094017094,
"repo_name": "theduke/django-kickstart",
"id": "dd9f116e8ddfadc3eb34b061f84db9bb23e894da",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_kickstart/template/app/apps/project_name/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "329473"
},
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Python",
"bytes": "30611"
},
{
"name": "Ruby",
"bytes": "2115"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Skill.short_description'
db.add_column('dnd_skill', 'short_description', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Skill.short_description'
db.delete_column('dnd_skill', 'short_description')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'object_name': 'CharacterClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.characterclassvariant': {
'Meta': {'unique_together': "(('character_class', 'rulebook'),)", 'object_name': 'CharacterClassVariant'},
'advancement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'advancement_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'class_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Skill']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill_points': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'object_name': 'Domain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'dnd.race': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Race'},
'cha': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'combat': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'racial_traits': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'str': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'dnd.racefavoredcharacterclass': {
'Meta': {'object_name': 'RaceFavoredCharacterClass'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favored_classes'", 'to': "orm['dnd.Race']"})
},
'dnd.racesize': {
'Meta': {'ordering': "['order']", 'object_name': 'RaceSize'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'dnd.racespeed': {
'Meta': {'object_name': 'RaceSpeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"}),
'speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['dnd.RaceSpeedType']"})
},
'dnd.racespeedtype': {
'Meta': {'ordering': "['name', 'extra']", 'object_name': 'RaceSpeedType'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'required_by_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'through': "orm['dnd.FeatRequiresSkill']", 'symmetrical': 'False'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.skillvariant': {
'Meta': {'unique_together': "(('skill', 'rulebook'),)", 'object_name': 'SkillVariant'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False', 'blank': 'True'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'extra_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_breath_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'true_name_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
|
{
"content_hash": "7e1c2a33746ea057f67d6bc5330b1124",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 220,
"avg_line_length": 80.0719696969697,
"alnum_prop": 0.545484649226548,
"repo_name": "gregpechiro/dndtools",
"id": "394c43cdd993a72db2444c2f8c09e313190f8bf1",
"size": "21157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dndtools/dnd/migrations/0050_auto__add_field_skill_short_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91256"
},
{
"name": "HTML",
"bytes": "190011"
},
{
"name": "JavaScript",
"bytes": "80639"
},
{
"name": "Python",
"bytes": "2775119"
}
],
"symlink_target": ""
}
|
import sys
import clr
clr.AddReference('System')
import System
def get_system_information():
info = System.String(sys.version_info)
return info
@property
def system_information():
info = System.String(sys.version_info)
return info
def echo(something):
return something
if __name__=="__main__":
print(get_system_information())
print(system_information)
System.Console.ReadLine()
|
{
"content_hash": "ca35732162571d0a1359b2e1d8caba17",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 42,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.7046004842615012,
"repo_name": "MaciekTalaska/IronPythonCSIntegration",
"id": "da19f672e8583c878ea6fe5e8318dc598bb7627c",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IronPythonApplication/IronPythonApplication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "3637"
},
{
"name": "Python",
"bytes": "1808"
}
],
"symlink_target": ""
}
|
import glob, itertools, os, sys, time
try:
import _thread
except ImportError:
import _dummy_thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading
except ImportError:
pass
RUN_RELOADER = True
_mtimes = {}
_win = (sys.platform == "win32")
def code_files():
return \
[os.path.abspath('conf.py')] + \
[v for v in [getattr(m, "__file__", None) for m in list(sys.modules.values())] if v]
def matching_files(patterns):
for pattern in patterns:
if os.path.isfile(pattern):
yield pattern
elif os.path.isdir(pattern):
for dirpath, dirnames, filenames in os.walk(pattern):
for filename in filenames:
yield os.path.join(dirpath, filename)
else:
for filename in glob.glob(pattern):
yield filename
def code_and_files(filepatterns):
return itertools.chain(code_files(), matching_files(filepatterns))
def files_changed(files):
global _mtimes, _win
for filename in files:
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if not os.path.exists(filename):
continue # File might be in an egg, so it can't be reloaded.
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
return True
return False
def reloader_thread(filepatterns):
while RUN_RELOADER:
if files_changed(code_and_files(filepatterns)):
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, filepatterns, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
_thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread(filepatterns)
except KeyboardInterrupt:
pass
else:
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def jython_reloader(main_func, filepatterns, args, kwargs):
from _systemrestart import SystemRestart
_thread.start_new_thread(main_func, args)
while True:
if code_changed(filepatterns):
raise SystemRestart
time.sleep(1)
def main(main_func, filepatterns=[], args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
reloader(main_func, filepatterns, args, kwargs)
|
{
"content_hash": "a9d56477aa31e776b7071c0cf4711eb5",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 92,
"avg_line_length": 29.40909090909091,
"alnum_prop": 0.6009273570324575,
"repo_name": "akheron/stango",
"id": "30585684c6502b98eaf851069e3d25ab71bda783",
"size": "6915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stango/autoreload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53518"
}
],
"symlink_target": ""
}
|
import os
import sys
PROJECT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__))
)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj.settings")
from django.core.management import execute_from_command_line
sys.path.append(os.path.join(PROJECT_DIR, 'dj', 'apps'))
execute_from_command_line(sys.argv)
|
{
"content_hash": "6f4767238eff2342db96285b05ab5763",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 24.2,
"alnum_prop": 0.6776859504132231,
"repo_name": "matllubos/django-is-core",
"id": "fdd12bff118ae05d204379ed8973c87bc41932e5",
"size": "385",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "example/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "277306"
},
{
"name": "CoffeeScript",
"bytes": "1797"
},
{
"name": "HTML",
"bytes": "46407"
},
{
"name": "Makefile",
"bytes": "3870"
},
{
"name": "Python",
"bytes": "367840"
}
],
"symlink_target": ""
}
|
"""Etcd2 specific configuration."""
import os
import shutil
import server
class Etcd2Cluster(object):
"""Sets up a global or cell-local etcd cluster."""
def __init__(self, name):
import environment # pylint: disable=g-import-not-at-top
self.port_base = environment.reserve_ports(2)
self.name = name
self.hostname = 'localhost'
self.client_port = self.port_base
self.peer_port = self.port_base + 1
self.client_addr = 'http://%s:%d' % (self.hostname, self.client_port)
self.peer_addr = 'http://%s:%d' % (self.hostname, self.peer_port)
self.api_url = self.client_addr + '/v2'
dirname = 'etcd_' + self.name
self.data_dir = os.path.join(environment.vtdataroot, dirname)
self.log_base = os.path.join(environment.vtlogroot, dirname)
self.start()
def start(self):
import utils # pylint: disable=g-import-not-at-top
self.proc = utils.run_bg([
'etcd', '-name', self.name,
'-advertise-client-urls', self.client_addr,
'-initial-advertise-peer-urls', self.peer_addr,
'-listen-client-urls', self.client_addr,
'-listen-peer-urls', self.peer_addr,
'-initial-cluster', '%s=%s' % (self.name, self.peer_addr),
'-data-dir', self.data_dir],
stdout=open(self.log_base + '.stdout', 'a'),
stderr=open(self.log_base + '.stderr', 'a'))
def restart(self):
self.stop()
self.start()
def stop(self):
import utils # pylint: disable=g-import-not-at-top
utils.kill_sub_process(self.proc)
self.proc.wait()
shutil.rmtree(self.data_dir)
def wait_until_up(self):
import utils # pylint: disable=g-import-not-at-top
# Wait for global cluster to come up.
# We create a dummy directory using v2 API, won't be visible to v3.
utils.curl(
self.api_url + '/keys/test', request='PUT',
data='dir=true', retry_timeout=10)
class Etcd2TopoServer(server.TopoServer):
"""Implementation of TopoServer for etcd2."""
clusters = {}
def setup(self, add_bad_host=False):
for cell in ['global', 'test_ca', 'test_nj', 'test_ny']:
self.clusters[cell] = Etcd2Cluster(cell)
self.wait_until_up_add_cells()
def teardown(self):
for cluster in self.clusters.itervalues():
cluster.stop()
def flags(self):
return [
'-topo_implementation', 'etcd2',
'-topo_global_server_address', self.clusters['global'].client_addr,
'-topo_global_root', '/global',
]
def wipe(self):
for cluster in self.clusters.itervalues():
cluster.restart()
self.wait_until_up_add_cells()
def update_addr(self, cell, keyspace, shard, tablet_index, port):
pass
def wait_until_up_add_cells(self):
import utils # pylint: disable=g-import-not-at-top
for cluster in self.clusters.itervalues():
cluster.wait_until_up()
# Add entries in global cell list.
for cell, cluster in self.clusters.iteritems():
if cell != 'global':
utils.run_vtctl_vtctl(['AddCellInfo',
'-root', '/',
'-server_address', cluster.client_addr,
cell])
server.flavor_map['etcd2'] = Etcd2TopoServer()
|
{
"content_hash": "b6958a833b2d10d53d27982045ae5acb",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 75,
"avg_line_length": 29.36036036036036,
"alnum_prop": 0.610309911015649,
"repo_name": "alainjobart/vitess",
"id": "5c7d3bdcd11eeb26a498d2c57f596c051ce0d1cc",
"size": "3861",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "test/topo_flavor/etcd2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "79101"
},
{
"name": "Go",
"bytes": "8349035"
},
{
"name": "HTML",
"bytes": "60555"
},
{
"name": "Java",
"bytes": "1176727"
},
{
"name": "JavaScript",
"bytes": "42815"
},
{
"name": "Liquid",
"bytes": "7030"
},
{
"name": "Makefile",
"bytes": "10624"
},
{
"name": "Python",
"bytes": "1211125"
},
{
"name": "Ruby",
"bytes": "3580"
},
{
"name": "Shell",
"bytes": "74505"
},
{
"name": "Smarty",
"bytes": "51137"
},
{
"name": "TypeScript",
"bytes": "155282"
},
{
"name": "Yacc",
"bytes": "59671"
}
],
"symlink_target": ""
}
|
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime, date, time
from decimal import Decimal
from mock import Mock
from uuid import UUID, uuid4
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.usertype import UserType, UserTypeDefinitionException
from cassandra.cqlengine import columns, connection
from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace, drop_table
from cassandra.util import Date, Time
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class UserDefinedTypeTests(BaseCassEngTestCase):
def setUp(self):
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest("UDTs require native protocol 3+, currently using: {0}".format(PROTOCOL_VERSION))
def test_can_create_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John")
self.assertEqual(42, user.age)
self.assertEqual("John", user.name)
# Add a field
class User(UserType):
age = columns.Integer()
name = columns.Text()
gender = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John", gender="male")
self.assertEqual(42, user.age)
self.assertEqual("John", user.name)
self.assertEqual("male", user.gender)
# Remove a field
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John", gender="male")
with self.assertRaises(AttributeError):
user.gender
def test_can_insert_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
UserModel.create(id=0, info=user)
self.assertEqual(1, UserModel.objects.count())
john = UserModel.objects().first()
self.assertEqual(0, john.id)
self.assertTrue(type(john.info) is User)
self.assertEqual(42, john.info.age)
self.assertEqual("John", john.info.name)
def test_can_update_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
created_user = UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertEqual("John", john_info.name)
created_user.info = User(age=22, name="Mary")
created_user.update()
mary_info = UserModel.objects().first().info
self.assertEqual(22, mary_info.age)
self.assertEqual("Mary", mary_info.name)
def test_can_update_udts_with_nones(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
created_user = UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertEqual("John", john_info.name)
created_user.info = None
created_user.update()
john_info = UserModel.objects().first().info
self.assertIsNone(john_info)
def test_can_create_same_udt_different_keyspaces(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
create_keyspace_simple("simplex", 1)
sync_type("simplex", User)
drop_keyspace("simplex")
def test_can_insert_partial_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
gender = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertEqual("John", john_info.name)
self.assertIsNone(john_info.gender)
user = User(age=42)
UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertIsNone(john_info.name)
self.assertIsNone(john_info.gender)
def test_can_insert_nested_udts(self):
class Depth_0(UserType):
age = columns.Integer()
name = columns.Text()
class Depth_1(UserType):
value = columns.UserDefinedType(Depth_0)
class Depth_2(UserType):
value = columns.UserDefinedType(Depth_1)
class Depth_3(UserType):
value = columns.UserDefinedType(Depth_2)
class DepthModel(Model):
id = columns.Integer(primary_key=True)
v_0 = columns.UserDefinedType(Depth_0)
v_1 = columns.UserDefinedType(Depth_1)
v_2 = columns.UserDefinedType(Depth_2)
v_3 = columns.UserDefinedType(Depth_3)
sync_table(DepthModel)
udts = [Depth_0(age=42, name="John")]
udts.append(Depth_1(value=udts[0]))
udts.append(Depth_2(value=udts[1]))
udts.append(Depth_3(value=udts[2]))
DepthModel.create(id=0, v_0=udts[0], v_1=udts[1], v_2=udts[2], v_3=udts[3])
output = DepthModel.objects().first()
self.assertEqual(udts[0], output.v_0)
self.assertEqual(udts[1], output.v_1)
self.assertEqual(udts[2], output.v_2)
self.assertEqual(udts[3], output.v_3)
def test_can_insert_udts_with_nones(self):
"""
Test for inserting all column types as empty into a UserType as None's
test_can_insert_udts_with_nones tests that each cqlengine column type can be inserted into a UserType as None's.
It first creates a UserType that has each cqlengine column type, and a corresponding table/Model. It then creates
a UserType instance where all the fields are None's and inserts the UserType as an instance of the Model. Finally,
it verifies that each column read from the UserType from Cassandra is None.
@since 2.5.0
@jira_ticket PYTHON-251
@expected_result The UserType is inserted with each column type, and the resulting read yields None's for each column.
@test_category data_types:udt
"""
class AllDatatypes(UserType):
a = columns.Ascii()
b = columns.BigInt()
c = columns.Blob()
d = columns.Boolean()
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float()
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
l = columns.TimeUUID()
m = columns.UUID()
n = columns.VarInt()
class AllDatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(AllDatatypes)
sync_table(AllDatatypesModel)
input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None,
l=None, m=None, n=None)
AllDatatypesModel.create(id=0, data=input)
self.assertEqual(1, AllDatatypesModel.objects.count())
output = AllDatatypesModel.objects().first().data
self.assertEqual(input, output)
def test_can_insert_udts_with_all_datatypes(self):
"""
Test for inserting all column types into a UserType
test_can_insert_udts_with_all_datatypes tests that each cqlengine column type can be inserted into a UserType.
It first creates a UserType that has each cqlengine column type, and a corresponding table/Model. It then creates
a UserType instance where all the fields have corresponding data, and inserts the UserType as an instance of the Model.
Finally, it verifies that each column read from the UserType from Cassandra is the same as the input parameters.
@since 2.5.0
@jira_ticket PYTHON-251
@expected_result The UserType is inserted with each column type, and the resulting read yields proper data for each column.
@test_category data_types:udt
"""
class AllDatatypes(UserType):
a = columns.Ascii()
b = columns.BigInt()
c = columns.Blob()
d = columns.Boolean()
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float()
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
l = columns.TimeUUID()
m = columns.UUID()
n = columns.VarInt()
class AllDatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(AllDatatypes)
sync_table(AllDatatypesModel)
input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True,
e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39,
h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text',
l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'),
m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000'))
AllDatatypesModel.create(id=0, data=input)
self.assertEqual(1, AllDatatypesModel.objects.count())
output = AllDatatypesModel.objects().first().data
for i in range(ord('a'), ord('a') + 14):
self.assertEqual(input[chr(i)], output[chr(i)])
def test_can_insert_udts_protocol_v4_datatypes(self):
"""
Test for inserting all protocol v4 column types into a UserType
test_can_insert_udts_protocol_v4_datatypes tests that each protocol v4 cqlengine column type can be inserted
into a UserType. It first creates a UserType that has each protocol v4 cqlengine column type, and a corresponding
table/Model. It then creates a UserType instance where all the fields have corresponding data, and inserts the
UserType as an instance of the Model. Finally, it verifies that each column read from the UserType from Cassandra
is the same as the input parameters.
@since 2.6.0
@jira_ticket PYTHON-245
@expected_result The UserType is inserted with each protocol v4 column type, and the resulting read yields proper data for each column.
@test_category data_types:udt
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes in UDTs require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
class Allv4Datatypes(UserType):
a = columns.Date()
b = columns.SmallInt()
c = columns.Time()
d = columns.TinyInt()
class Allv4DatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(Allv4Datatypes)
sync_table(Allv4DatatypesModel)
input = Allv4Datatypes(a=Date(date(1970, 1, 1)), b=32523, c=Time(time(16, 47, 25, 7)), d=123)
Allv4DatatypesModel.create(id=0, data=input)
self.assertEqual(1, Allv4DatatypesModel.objects.count())
output = Allv4DatatypesModel.objects().first().data
for i in range(ord('a'), ord('a') + 3):
self.assertEqual(input[chr(i)], output[chr(i)])
def test_nested_udts_inserts(self):
"""
Test for inserting collections of user types using cql engine.
test_nested_udts_inserts Constructs a model that contains a list of usertypes. It will then attempt to insert
them. The expectation is that no exception is thrown during insert. For sanity sake we also validate that our
input and output values match. This combination of model, and UT produces a syntax error in 2.5.1 due to
improper quoting around the names collection.
@since 2.6.0
@jira_ticket PYTHON-311
@expected_result No syntax exception thrown
@test_category data_types:udt
"""
class Name(UserType):
type_name__ = "header"
name = columns.Text()
value = columns.Text()
class Container(Model):
id = columns.UUID(primary_key=True, default=uuid4)
names = columns.List(columns.UserDefinedType(Name))
# Construct the objects and insert them
names = []
for i in range(0, 10):
names.append(Name(name="name{0}".format(i), value="value{0}".format(i)))
# Create table, insert data
sync_table(Container)
Container.create(id=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), names=names)
# Validate input and output matches
self.assertEqual(1, Container.objects.count())
names_output = Container.objects().first().names
self.assertEqual(names_output, names)
def test_udts_with_unicode(self):
"""
Test for inserting models with unicode and udt columns.
test_udts_with_unicode constructs a model with a user defined type. It then attempts to insert that model with
a unicode primary key. It will also attempt to upsert a udt that contains unicode text.
@since 3.0.0
@jira_ticket PYTHON-353
@expected_result No exceptions thrown
@test_category data_types:udt
"""
ascii_name = 'normal name'
unicode_name = u'Fran\u00E7ois'
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModelText(Model):
id = columns.Text(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModelText)
# Two udt instances one with a unicode one with ascii
user_template_ascii = User(age=25, name=ascii_name)
user_template_unicode = User(age=25, name=unicode_name)
UserModelText.create(id=ascii_name, info=user_template_unicode)
UserModelText.create(id=unicode_name, info=user_template_ascii)
UserModelText.create(id=unicode_name, info=user_template_unicode)
def test_register_default_keyspace(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
from cassandra.cqlengine import models
from cassandra.cqlengine import connection
# None emulating no model and no default keyspace before connecting
connection.udt_by_keyspace.clear()
User.register_for_keyspace(None)
self.assertEqual(len(connection.udt_by_keyspace), 1)
self.assertIn(None, connection.udt_by_keyspace)
# register should be with default keyspace, not None
cluster = Mock()
connection._register_known_types(cluster)
cluster.register_user_type.assert_called_with(models.DEFAULT_KEYSPACE, User.type_name(), User)
def test_db_field_override(self):
"""
Tests for db_field override
Tests to ensure that udt's in models can specify db_field for a particular field and that it will be honored.
@since 3.1.0
@jira_ticket PYTHON-346
@expected_result The actual cassandra column will use the db_field specified.
@test_category data_types:udt
"""
class db_field_different(UserType):
age = columns.Integer(db_field='a')
name = columns.Text(db_field='n')
class TheModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(db_field_different)
sync_table(TheModel)
cluster = connection.get_cluster()
type_meta = cluster.metadata.keyspaces[TheModel._get_keyspace()].user_types[db_field_different.type_name()]
type_fields = (db_field_different.age.column, db_field_different.name.column)
self.assertEqual(len(type_meta.field_names), len(type_fields))
for f in type_fields:
self.assertIn(f.db_field_name, type_meta.field_names)
id = 0
age = 42
name = 'John'
info = db_field_different(age=age, name=name)
TheModel.create(id=id, info=info)
self.assertEqual(1, TheModel.objects.count())
john = TheModel.objects().first()
self.assertEqual(john.id, id)
info = john.info
self.assertIsInstance(info, db_field_different)
self.assertEqual(info.age, age)
self.assertEqual(info.name, name)
# also excercise the db_Field mapping
self.assertEqual(info.a, age)
self.assertEqual(info.n, name)
def test_db_field_overload(self):
"""
Tests for db_field UserTypeDefinitionException
Test so that when we override a model's default field witha db_field that it errors appropriately
@since 3.1.0
@jira_ticket PYTHON-346
@expected_result Setting a db_field to an existing field causes an exception to occur.
@test_category data_types:udt
"""
with self.assertRaises(UserTypeDefinitionException):
class something_silly(UserType):
first_col = columns.Integer()
second_col = columns.Text(db_field='first_col')
with self.assertRaises(UserTypeDefinitionException):
class something_silly_2(UserType):
first_col = columns.Integer(db_field="second_col")
second_col = columns.Text()
def test_set_udt_fields(self):
# PYTHON-502
class User(UserType):
age = columns.Integer()
name = columns.Text()
u = User()
u.age = 20
self.assertEqual(20, u.age)
|
{
"content_hash": "cb2114b4ba199b70492a234b6b74490f",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 143,
"avg_line_length": 36.40503875968992,
"alnum_prop": 0.6201756720787862,
"repo_name": "kishkaru/python-driver",
"id": "dc7eb134c0e1d107697ea5ec4168855d79e58ee7",
"size": "19364",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integration/cqlengine/model/test_udts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28924"
},
{
"name": "PowerShell",
"bytes": "4614"
},
{
"name": "Python",
"bytes": "1997472"
}
],
"symlink_target": ""
}
|
"""SavedModel builder.
Builds a SavedModel that can be saved to storage, is language neutral, and
enables systems to produce, consume, or transform TensorFlow Models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.any_pb2 import Any
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
class SavedModelBuilder(object):
"""Builds the `SavedModel` protocol buffer and saves variables and assets.
The `SavedModelBuilder` class provides functionality to build a `SavedModel`
protocol buffer. Specifically, this allows multiple meta graphs to be saved as
part of a single language-neutral `SavedModel`, while sharing variables and
assets.
To build a SavedModel, the first meta graph must be saved with variables.
Subsequent meta graphs will simply be saved with their graph definitions. If
assets need to be saved and written or copied to disk, they must be provided
as part of the first meta graph to be saved. Subsequent meta graphs can
provide a subset of the initial assets to be added to the SavedModel
definition.
Each meta graph added to the SavedModel must be annotated with tags. The tags
provide a means to identify the specific meta graph to load and restore, along
with the shared set of variables and assets.
Typical usage for the `SavedModelBuilder`:
```python
...
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
asset_collection=foo_assets)
...
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"])
...
builder.save()
```
"""
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if not file_io.file_exists(export_dir):
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
def _asset_path_from_tensor(self, path_tensor):
"""Returns the filepath value stored in constant `path_tensor`.
Args:
path_tensor: Tensor of a file-path.
Returns:
The string value i.e. path of the tensor, if valid.
Raises:
TypeError if tensor does not match expected op type, dtype or value.
"""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError("Asset path tensor must be a Tensor.")
if path_tensor.op.type != "Const":
raise TypeError("Asset path tensor must be of type constant.")
if path_tensor.dtype != dtypes.string:
raise TypeError("Asset path tensor must be of dtype string.")
str_values = path_tensor.op.get_attr("value").string_val
if len(str_values) != 1:
raise TypeError("Asset path tensor must be a scalar.")
return str_values[0]
def _add_asset_to_collection(self, asset_filename, asset_tensor):
"""Builds an asset proto and adds it to the asset collection of the graph.
Args:
asset_filename: The filename of the asset to be added.
asset_tensor: The asset tensor used to populate the tensor info of the
asset proto.
"""
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = asset_filename
asset_proto.tensor_info.name = asset_tensor.name
asset_any_proto = Any()
asset_any_proto.Pack(asset_proto)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)
def _save_and_write_assets(self, assets_collection_to_add=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
"""
asset_source_filepath_list = self._save_assets(assets_collection_to_add)
# Return if there are no assets to write.
if len(asset_source_filepath_list) is 0:
tf_logging.info("No assets to write.")
return
assets_destination_dir = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
if not file_io.file_exists(assets_destination_dir):
file_io.recursive_create_dir(assets_destination_dir)
# Copy each asset from source path to destination path.
for asset_source_filepath in asset_source_filepath_list:
asset_source_filename = os.path.basename(asset_source_filepath)
asset_destination_filepath = os.path.join(
compat.as_bytes(assets_destination_dir),
compat.as_bytes(asset_source_filename))
file_io.copy(
asset_source_filepath, asset_destination_filepath, overwrite=True)
tf_logging.info("Assets written to: %s", assets_destination_dir)
def _save_assets(self, assets_collection_to_add=None):
"""Saves assets to the meta graph.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
Returns:
The list of filepaths to the assets in the assets collection.
Raises:
ValueError: Indicating an invalid filepath tensor.
"""
asset_source_filepath_list = []
if assets_collection_to_add is None:
tf_logging.info("No assets to save.")
return asset_source_filepath_list
# Iterate over the supplied asset collection, build the `AssetFile` proto
# and add them to the collection with key `constants.ASSETS_KEY`, in the
# graph.
for asset_tensor in assets_collection_to_add:
asset_source_filepath = self._asset_path_from_tensor(asset_tensor)
if not asset_source_filepath:
raise ValueError("Invalid asset filepath tensor %s" % asset_tensor)
asset_source_filename = os.path.basename(asset_source_filepath)
# Build `AssetFile` proto and add it to the asset collection in the graph.
self._add_asset_to_collection(asset_source_filename, asset_tensor)
asset_source_filepath_list.append(asset_source_filepath)
tf_logging.info("Assets added to graph.")
return asset_source_filepath_list
def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):
"""Tags the meta graph def and adds it to the SavedModel.
Tags the meta graph def with the supplied tags, adds signature defs to it if
provided and appends the meta graph def to the SavedModel proto.
Args:
meta_graph_def: The meta graph def to add to the SavedModel.
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
"""
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
if signature_def_map is not None:
for key in signature_def_map:
meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])
proto_meta_graph_def = self._saved_model.meta_graphs.add()
proto_meta_graph_def.CopyFrom(meta_graph_def)
def add_meta_graph(self, tags, signature_def_map=None,
assets_collection=None):
"""Adds the current meta graph to the SavedModel.
Creates a Saver in the current scope and uses the Saver to export the meta
graph def. Invoking this API requires the `add_meta_graph_and_variables()`
API to have been invoked before.
Args:
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel. Note
that this collection should be a subset of the assets saved as part of
the first meta graph in the SavedModel.
Raises:
AssertionError: If the variables for the SavedModel have not been saved
yet.
"""
if not self._has_saved_variables:
raise AssertionError(
"Variables and assets have not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Save asset files, if any.
self._save_assets(assets_collection)
saver = tf_saver.Saver(variables.all_variables(), sharded=True)
meta_graph_def = saver.export_meta_graph()
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_collection=None):
"""Adds the current meta graph to the SavedModel and saves variables.
Creates a Saver to save the variables from the provided session. Exports the
corresponding meta graph def. This function assumes that the variables to be
saved have been initialized. For a given `SavedModelBuilder`, this API must
be called exactly once and for the first meta graph to save. For subsequent
meta graph defs to be added, the `add_meta_graph()` API must be used.
Args:
sess: The TensorFlow session from which to save the meta graph and
variables.
tags: The set of tags with which to save the meta graph.
signature_def_map: The map of signature def map to add to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel.
"""
if self._has_saved_variables:
raise AssertionError("Variables and assets have already been saved. "
"Please invoke `add_meta_graph()` instead.")
# Save asset files and write them to disk, if any.
self._save_and_write_assets(assets_collection)
# Create the variables sub-directory, if it does not exist.
variables_dir = os.path.join(
compat.as_text(self._export_dir),
compat.as_text(constants.VARIABLES_DIRECTORY))
if not file_io.file_exists(variables_dir):
file_io.recursive_create_dir(variables_dir)
variables_path = os.path.join(
compat.as_text(variables_dir),
compat.as_text(constants.VARIABLES_FILENAME))
# Save the variables and export meta graph def.
saver = tf_saver.Saver(variables.all_variables(), sharded=True)
saver.save(sess, variables_path, write_meta_graph=False)
meta_graph_def = saver.export_meta_graph()
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
def save(self, as_text=False):
"""Writes a `SavedModel` protocol buffer to disk.
The function writes the SavedModel protocol buffer to the export directory
in serialized format.
Args:
as_text: Writes the SavedModel protocol buffer in text format to disk.
Returns:
The path to which the SavedModel protocol buffer was written.
"""
if not file_io.file_exists(self._export_dir):
file_io.recursive_create_dir(self._export_dir)
if as_text:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
file_io.write_string_to_file(path, str(self._saved_model))
else:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, self._saved_model.SerializeToString())
tf_logging.info("SavedModel written to: %s", path)
return path
|
{
"content_hash": "33615d623620e6ee047d54a0fc2c4f17",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 80,
"avg_line_length": 38.52,
"alnum_prop": 0.6886332774183241,
"repo_name": "mrry/tensorflow",
"id": "23768f12476a03f4a74e17e50014e48138b78538",
"size": "13209",
"binary": false,
"copies": "3",
"ref": "refs/heads/windows",
"path": "tensorflow/python/saved_model/builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88579"
},
{
"name": "C++",
"bytes": "12927212"
},
{
"name": "CMake",
"bytes": "66937"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "42531"
},
{
"name": "HTML",
"bytes": "1171692"
},
{
"name": "Java",
"bytes": "51034"
},
{
"name": "JavaScript",
"bytes": "12972"
},
{
"name": "Jupyter Notebook",
"bytes": "1833435"
},
{
"name": "Makefile",
"bytes": "23439"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "136850"
},
{
"name": "Python",
"bytes": "11873711"
},
{
"name": "Shell",
"bytes": "267180"
},
{
"name": "TypeScript",
"bytes": "675176"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
######################################################################
# Copyright (c) 2017 by WLBlazers Corporation
#
# switchover.py
#
#
######################################################################
# Modifications Section:
######################################################################
## Date File Changes
######################################################################
## 07/11/2019 Baseline version 1.0.0
##
######################################################################
import os
import string
import datetime
import sys, getopt
import traceback
import mysql_handle as mysql
import sqlserver_handle as sqlserver
import common
import logging
import logging.config
logging.config.fileConfig('./logging.conf')
logger = logging.getLogger('WLBlazers')
###############################################################################
# function switch_mirror
###############################################################################
def switch_mirror(mysql_conn, db_type, group_id, db_name, p_conn, s_conn, pri_id):
result=-1
logger.info("Switchover database to physical standby in progress...")
# get database role
str='''select m.mirroring_role
from sys.database_mirroring m, sys.databases d
where M.mirroring_guid is NOT NULL
AND m.database_id = d.database_id
AND d.name = '%s'; ''' %(db_name)
role=sqlserver.GetSingleValue(p_conn, str)
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '获取数据库角色成功', 20, 2)
logger.info("The current database role is: %s (1:PRIMARY; 2:STANDBY)" %(role))
# get mirror status
str='''select m.mirroring_state
from sys.database_mirroring m, sys.databases d
where M.mirroring_guid is NOT NULL
AND m.database_id = d.database_id
AND d.name = '%s'; ''' %(db_name)
mirror_status=sqlserver.GetSingleValue(p_conn, str)
logger.info("The current database mirror status is: %s (0:已挂起; 1:与其他伙伴断开; 2:正在同步; 3:挂起故障转移; 4:已同步; 5:伙伴未同步; 6:伙伴已同步;)" %(mirror_status))
# get database version
#str="""SELECT @@VERSION"""
#version=sqlserver.GetSingleValue(p_conn, str)
if role==1:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '验证数据库角色成功', 30, 2)
logger.info("Now we are going to switch database %s to physical standby." %(pri_id))
if mirror_status==4:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '正在将主库切换成备库,可能会花费几分钟时间,请耐心等待...', 40, 0)
#设置自动提交,否则alter database执行报错
p_conn.autocommit(True)
logger.info("SET SAFETY FULL... ")
#设置镜像传输模式为高安全模式
str='''ALTER DATABASE %s SET SAFETY FULL; ''' %(db_name)
res=sqlserver.ExecuteSQL(p_conn, str)
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '主库已经成功切换成高安全模式', 50, 2)
logger.info("SET PARTNER FAILOVER begin... ")
#切换镜像
str='''ALTER DATABASE %s SET PARTNER FAILOVER;''' %(db_name)
res=sqlserver.ExecuteSQL(p_conn, str)
p_conn.autocommit(False)
str='''select m.mirroring_role
from sys.database_mirroring m, sys.databases d
where M.mirroring_guid is NOT NULL
AND m.database_id = d.database_id
AND d.name = '%s'; ''' %(db_name)
new_role=sqlserver.GetSingleValue(p_conn, str)
if new_role==2:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '主库已经成功切换成备库', 70, 2)
logger.info("SET PARTNER FAILOVER successfully.")
#设置镜像传输模式为高性能模式
s_conn.autocommit(True)
str='''ALTER DATABASE %s SET SAFETY OFF; ''' %(db_name)
res=sqlserver.ExecuteSQL(s_conn, str)
s_conn.autocommit(False)
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '主库已经成功切换成高性能模式', 90, 2)
logger.info("SET SAFETY OFF successfully.")
result=0
else:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '主库切换备库失败', 70, 2)
logger.info("SET PARTNER FAILOVER failed.")
result=-1
else:
common.update_db_op_reason(mysql_conn, db_type, group_id, 'SWITCHOVER', '验证数据库角色失败,当前数据库不是主库,不能切换到备库')
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '验证数据库角色失败,当前数据库不是主库,不能切换到备库', 20, 2)
logger.error("You can not switchover a standby database to standby!")
return result
###############################################################################
# function update_switch_flag
###############################################################################
def update_switch_flag(mysql_conn, group_id):
logger.info("Update switch flag in db_cfg_sqlserver_mirror for group %s in progress..." %(group_id))
# get current switch flag
str='select is_switch from db_cfg_sqlserver_mirror where id= %s' %(group_id)
is_switch=mysql.GetSingleValue(mysql_conn, str)
logger.info("The current switch flag is: %s" %(is_switch))
if is_switch==0:
str="""update db_cfg_sqlserver_mirror set is_switch = 1 where id = %s"""%(group_id)
else:
str="""update db_cfg_sqlserver_mirror set is_switch = 0 where id = %s"""%(group_id)
is_succ = mysql.ExecuteSQL(mysql_conn, str)
if is_succ==1:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '镜像组更新状态成功', 100, 2)
logger.info("Update switch flag in db_cfg_sqlserver_mirror for group %s successfully." %(group_id))
else:
logger.info("Update switch flag in db_cfg_sqlserver_mirror for group %s failed." %(group_id))
###############################################################################
# main function
###############################################################################
if __name__=="__main__":
# parse argv
pri_id = ''
sta_id = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"p:s:g:")
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-p':
pri_id = arg
elif opt == '-s':
sta_id = arg
elif opt == '-g':
group_id = arg
###########################################################################
# connect to mysql
mysql_conn = ''
try:
mysql_conn = mysql.ConnectMysql()
except Exception as e:
logger.error(e)
sys.exit(2)
# get infomation from mysql
db_name = ""
p_host = ""
p_port = ""
p_username = ""
p_password = ""
s_host = ""
s_port = ""
s_username = ""
s_password = ""
name_str = """select db_name from db_cfg_sqlserver_mirror where id=%s; """ %(group_id)
db_name = mysql.GetSingleValue(mysql_conn, name_str)
p_str = """select host, port, username, password from db_cfg_sqlserver where id=%s; """ %(pri_id)
res1 = mysql.GetSingleRow(mysql_conn, p_str)
if res1:
p_host = res1[0]
p_port = res1[1]
p_username = res1[2]
p_password = res1[3]
s_str = """select host, port, username, password from db_cfg_sqlserver where id=%s; """ %(sta_id)
res2 = mysql.GetSingleRow(mysql_conn, s_str)
if res2:
s_host = res2[0]
s_port = res2[1]
s_username = res2[2]
s_password = res2[3]
#print s_host,s_port,s_username,s_password
p_str = """select concat(host, ':', port) from db_cfg_sqlserver where id=%s; """ %(pri_id)
p_nopass_str = mysql.GetSingleValue(mysql_conn, p_str)
s_str = """select concat(host, ':', port) from db_cfg_sqlserver where id=%s; """ %(sta_id)
s_nopass_str = mysql.GetSingleValue(mysql_conn, s_str)
logger.info("The primary database is: " + p_nopass_str + ", the id is: " + str(pri_id))
logger.info("The standby database is: " + s_nopass_str + ", the id is: " + str(sta_id))
db_type = "sqlserver"
try:
common.db_op_lock(mysql_conn, db_type, group_id, 'SWITCHOVER') # 加锁
common.init_db_op_instance(mysql_conn, db_type, group_id, 'SWITCHOVER') #初始化切换实例
# connect to sqlserver
p_conn = sqlserver.ConnectMssql(p_host,p_port,p_username,p_password)
s_conn = sqlserver.ConnectMssql(s_host,s_port,s_username,s_password)
if p_conn is None:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '连接主库失败,请根据相应日志查看原因', 10, 3)
logger.error("Connect to primary database error, exit!!!")
common.update_db_op_reason(mysql_conn, db_type, group_id, 'SWITCHOVER', '连接主库失败')
common.update_db_op_result(mysql_conn, db_type, group_id, 'SWITCHOVER', '-1')
sys.exit(2)
if s_conn is None:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '连接备库失败,请根据相应日志查看原因', 10, 3)
logger.error("Connect to standby database error, exit!!!")
common.update_db_op_reason(mysql_conn, db_type, group_id, 'SWITCHOVER', '连接备库失败')
common.update_db_op_result(mysql_conn, db_type, group_id, 'SWITCHOVER', '-1')
sys.exit(2)
# 正式开始切换
try:
common.log_db_op_process(mysql_conn, db_type, group_id, 'SWITCHOVER', '准备执行主备切换', 10, 2)
res_2s=switch_mirror(mysql_conn, db_type, group_id, db_name, p_conn, s_conn, pri_id)
if res_2s ==0:
update_switch_flag(mysql_conn, group_id)
common.gen_alert_sqlserver(sta_id, 1, db_name) # generate alert
common.update_db_op_result(mysql_conn, db_type, group_id, 'SWITCHOVER', '0')
else:
common.update_db_op_result(mysql_conn, db_type, group_id, 'SWITCHOVER', res_2s)
except Exception,e:
logger.error(traceback.format_exc())
pass
except Exception,e:
logger.error(traceback.format_exc())
pass
finally:
common.db_op_unlock(mysql_conn, db_type, group_id, 'SWITCHOVER')
|
{
"content_hash": "7c8b1f1d853119eff6771c3bdda20fc3",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 140,
"avg_line_length": 37.84782608695652,
"alnum_prop": 0.5341757610568638,
"repo_name": "JK-Warriors/Heimdallr",
"id": "390d1a670c94a4308fb775e007ad738f90f2deca",
"size": "11056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/application/scripts/mssql_switchover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138793"
},
{
"name": "HTML",
"bytes": "88463"
},
{
"name": "JavaScript",
"bytes": "9581672"
},
{
"name": "PHP",
"bytes": "2937844"
},
{
"name": "Python",
"bytes": "602526"
},
{
"name": "Shell",
"bytes": "14641"
},
{
"name": "TSQL",
"bytes": "140135"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.test.simple import DjangoTestSuiteRunner
try: # pragma: no cover
from xmlrunner import XMLTestRunner as runner
except: # pragma: no cover
runner = False
class TestSuiteRunner(DjangoTestSuiteRunner): # pragma: no cover
use_runner = runner
def run_suite(self, suite, **kwargs):
if self.use_runner and self.verbosity >= 2: # cooler switch for xml
return self.use_runner(
output=getattr(settings, 'JUNIT_OUTPUT_DIR', '.')
).run(suite)
else:
return super(TestSuiteRunner, self).run_suite(suite, **kwargs)
|
{
"content_hash": "2c7dde145b8a14ff52e96d090b66a1b7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 35.05555555555556,
"alnum_prop": 0.6671949286846276,
"repo_name": "ojii/django-nani",
"id": "9c56435cdee65666af8c295dbff61924e48acaf5",
"size": "631",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testproject/testrunner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "219201"
},
{
"name": "Shell",
"bytes": "4972"
}
],
"symlink_target": ""
}
|
"""change_vim_shared_property_to_false
Revision ID: 31acbaeb8299
Revises: e7993093baf1
Create Date: 2017-05-30 23:46:20.034085
"""
# flake8: noqa: E402
# revision identifiers, used by Alembic.
revision = '31acbaeb8299'
down_revision = 'e7993093baf1'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
op.alter_column('vims', 'shared',
existing_type=sa.Boolean(),
server_default=sa.text('false'),
nullable=False)
|
{
"content_hash": "30141036478cb3ee61c2ada082fd56e5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 52,
"avg_line_length": 22.91304347826087,
"alnum_prop": 0.6622390891840607,
"repo_name": "stackforge/tacker",
"id": "c733addec8d35acddacc649886230c8d1686be25",
"size": "1142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tacker/db/migration/alembic_migrations/versions/31acbaeb8299_change_vim_shared_property_to_false.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1143026"
},
{
"name": "Shell",
"bytes": "26584"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
('sentry', '0103_project_has_alert_filters'),
]
operations = [
migrations.AddField(
model_name='relayusage',
name='public_key',
field=models.CharField(db_index=True, max_length=200, null=True),
),
]
|
{
"content_hash": "2ea9614b9495c007a5f3e658c8f22bc0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 88,
"avg_line_length": 41.285714285714285,
"alnum_prop": 0.6899653979238755,
"repo_name": "beeftornado/sentry",
"id": "8ffa231303e12183a13a3413ebac794ccb256f09",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/migrations/0104_collect_relay_public_key_usage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
import json
import time
from morph import flatten
def write_in_csv_format(iostream, frame):
"""
Writes frame data and metadata into iostream in csv format.
:param iostream: a CStringIO used to buffer the formatted features.
:param frame: a BaseFrame object to be written into iostream
:return: None
"""
iostream.write('%s\t%s\t%s\n' %
('metadata', json.dumps('metadata'),
json.dumps(frame.metadata, separators=(',', ':'))))
for (key, val, feature_type) in frame.data:
if not isinstance(val, dict):
val = val._asdict()
iostream.write('%s\t%s\t%s\n' % (
feature_type, json.dumps(key),
json.dumps(val, separators=(',', ':'))))
def write_in_json_format(iostream, frame):
"""
Writes frame data and metadata into iostream in json format.
:param iostream: a CStringIO used to buffer the formatted features.
:param frame: a BaseFrame object to be written into iostream
:return: None
"""
iostream.write('%s\n' % json.dumps(frame.metadata))
for (key, val, feature_type) in frame.data:
if not isinstance(val, dict):
val = val._asdict()
val['feature_type'] = feature_type
val['namespace'] = frame.metadata.get('namespace', '')
iostream.write('%s\n' % json.dumps(val))
def write_in_logstash_format(iostream, frame):
"""
Writes frame data and meta data in json format.
Similar to write_in_json_format, but this method concatenate them
in to a single json object.
:param iostream: a CStringIO used to buffer the formatted features.
:param frame: a BaseFrame Object to be written into iostream
:return: None
"""
payload = {}
payload['metadata'] = frame.metadata
for (key, val, feature_type) in frame.data:
if not isinstance(val, dict):
val = val._asdict()
if feature_type not in payload:
payload[feature_type] = {}
payload[feature_type][key] = val
iostream.write('%s\n' % json.dumps(payload))
def write_in_graphite_format(iostream, frame):
"""
Writes frame data and metadata into iostream in graphite format.
:param iostream: a CStringIO used to buffer the formatted features.
:param frame: a BaseFrame object to be written into iostream
:return: None
"""
namespace = frame.metadata.get('namespace', '')
timestamp = frame.metadata.get('timestamp', '')
for (key, val, feature_type) in frame.data:
if not isinstance(val, dict):
val = val._asdict()
write_feature_in_graphite_format(iostream, namespace, timestamp,
key, val, feature_type)
def write_feature_in_graphite_format(iostream, namespace, timestamp,
feature_key, feature_val,
feature_type):
"""
Write a feature in graphite format into iostream. The graphite format
looks like this, one line per metric value:
[namespace].[feature_key].[metric] [value] [timestamp]\r\n
[namespace].[feature_key].[metric] [value] [timestamp]\r\n
[namespace].[feature_key].[metric] [value] [timestamp]\r\n
This function converts a feature into that string and writes it into
the iostream.
:param namespace: Frame namespace for this feature
:param timestamp: From frame metadata, fmt: %Y-%m-%dT%H:%M:%S%z
:param feature_type:
:param feature_key:
:param feature_val:
:param iostream: a CStringIO used to buffer the formatted features.
:return: None
"""
# to convert 2017-02-07T13:20:15-0500 to 1486491615 (=epoch)
# for python >=3.2, following works
# time.strptime(timestamp,'%Y-%m-%dT%H:%M:%S%z'),
# but previous pyhon versions don't respect %z timezone info,
# so skipping time zone conversion assuming
# timezone during str timestamp creation in metadata is same for reverse
timestamp = time.mktime(time.strptime(timestamp[:-5], '%Y-%m-%dT%H:%M:%S'))
items = flatten(feature_val).items()
if isinstance(namespace, dict):
namespace = json.dumps(namespace)
else:
namespace = namespace.replace('/', '.')
for (metric, value) in items:
try:
# Only emit values that we can cast as floats
value = float(value)
except (TypeError, ValueError):
continue
metric = metric.replace('(', '_').replace(')', '')
metric = metric.replace(' ', '_').replace('-', '_')
metric = metric.replace('/', '_').replace('\\', '_')
feature_key = feature_key.replace('_', '-')
if 'cpu' in feature_key or 'memory' in feature_key:
metric = metric.replace('_', '-')
if 'if' in metric:
metric = metric.replace('_tx', '.tx')
metric = metric.replace('_rx', '.rx')
if feature_key == 'load':
feature_key = 'load.load'
feature_key = feature_key.replace('/', '$')
tmp_message = '%s.%s.%s %f %d\n' % (namespace, feature_key,
metric, value, timestamp)
iostream.write(tmp_message)
|
{
"content_hash": "da98ace13287879999ad8f37688d4e34",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 37.22142857142857,
"alnum_prop": 0.6008443676837459,
"repo_name": "canturkisci/agentless-system-crawler",
"id": "95a4c663f49a196c735c0cb4b6317f8312b64632",
"size": "5211",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "crawler/formatters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "793"
},
{
"name": "Makefile",
"bytes": "2108"
},
{
"name": "Python",
"bytes": "704517"
},
{
"name": "Shell",
"bytes": "94228"
}
],
"symlink_target": ""
}
|
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_vertex_array'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_EXT_vertex_array',False)
_p.unpack_constants( """GL_VERTEX_ARRAY_EXT 0x8074
GL_NORMAL_ARRAY_EXT 0x8075
GL_COLOR_ARRAY_EXT 0x8076
GL_INDEX_ARRAY_EXT 0x8077
GL_TEXTURE_COORD_ARRAY_EXT 0x8078
GL_EDGE_FLAG_ARRAY_EXT 0x8079
GL_VERTEX_ARRAY_SIZE_EXT 0x807A
GL_VERTEX_ARRAY_TYPE_EXT 0x807B
GL_VERTEX_ARRAY_STRIDE_EXT 0x807C
GL_VERTEX_ARRAY_COUNT_EXT 0x807D
GL_NORMAL_ARRAY_TYPE_EXT 0x807E
GL_NORMAL_ARRAY_STRIDE_EXT 0x807F
GL_NORMAL_ARRAY_COUNT_EXT 0x8080
GL_COLOR_ARRAY_SIZE_EXT 0x8081
GL_COLOR_ARRAY_TYPE_EXT 0x8082
GL_COLOR_ARRAY_STRIDE_EXT 0x8083
GL_COLOR_ARRAY_COUNT_EXT 0x8084
GL_INDEX_ARRAY_TYPE_EXT 0x8085
GL_INDEX_ARRAY_STRIDE_EXT 0x8086
GL_INDEX_ARRAY_COUNT_EXT 0x8087
GL_TEXTURE_COORD_ARRAY_SIZE_EXT 0x8088
GL_TEXTURE_COORD_ARRAY_TYPE_EXT 0x8089
GL_TEXTURE_COORD_ARRAY_STRIDE_EXT 0x808A
GL_TEXTURE_COORD_ARRAY_COUNT_EXT 0x808B
GL_EDGE_FLAG_ARRAY_STRIDE_EXT 0x808C
GL_EDGE_FLAG_ARRAY_COUNT_EXT 0x808D
GL_VERTEX_ARRAY_POINTER_EXT 0x808E
GL_NORMAL_ARRAY_POINTER_EXT 0x808F
GL_COLOR_ARRAY_POINTER_EXT 0x8090
GL_INDEX_ARRAY_POINTER_EXT 0x8091
GL_TEXTURE_COORD_ARRAY_POINTER_EXT 0x8092
GL_EDGE_FLAG_ARRAY_POINTER_EXT 0x8093""", globals())
glget.addGLGetConstant( GL_VERTEX_ARRAY_EXT, (1,) )
glget.addGLGetConstant( GL_NORMAL_ARRAY_EXT, (1,) )
glget.addGLGetConstant( GL_INDEX_ARRAY_EXT, (1,) )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_EXT, (1,) )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_EXT, (1,) )
glget.addGLGetConstant( GL_VERTEX_ARRAY_SIZE_EXT, (1,) )
glget.addGLGetConstant( GL_VERTEX_ARRAY_TYPE_EXT, (1,) )
glget.addGLGetConstant( GL_VERTEX_ARRAY_STRIDE_EXT, (1,) )
glget.addGLGetConstant( GL_VERTEX_ARRAY_COUNT_EXT, (1,) )
glget.addGLGetConstant( GL_NORMAL_ARRAY_TYPE_EXT, (1,) )
glget.addGLGetConstant( GL_NORMAL_ARRAY_STRIDE_EXT, (1,) )
glget.addGLGetConstant( GL_NORMAL_ARRAY_COUNT_EXT, (1,) )
glget.addGLGetConstant( GL_COLOR_ARRAY_SIZE_EXT, (1,) )
glget.addGLGetConstant( GL_COLOR_ARRAY_TYPE_EXT, (1,) )
glget.addGLGetConstant( GL_COLOR_ARRAY_STRIDE_EXT, (1,) )
glget.addGLGetConstant( GL_COLOR_ARRAY_COUNT_EXT, (1,) )
glget.addGLGetConstant( GL_INDEX_ARRAY_TYPE_EXT, (1,) )
glget.addGLGetConstant( GL_INDEX_ARRAY_STRIDE_EXT, (1,) )
glget.addGLGetConstant( GL_INDEX_ARRAY_COUNT_EXT, (1,) )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_SIZE_EXT, (1,) )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_TYPE_EXT, (1,) )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_STRIDE_EXT, (1,) )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_COUNT_EXT, (1,) )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_STRIDE_EXT, (1,) )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_COUNT_EXT, (1,) )
@_f
@_p.types(None,_cs.GLint)
def glArrayElementEXT( i ):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,ctypes.c_void_p)
def glColorPointerEXT( size,type,stride,count,pointer ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLsizei)
def glDrawArraysEXT( mode,first,count ):pass
@_f
@_p.types(None,_cs.GLsizei,_cs.GLsizei,arrays.GLbooleanArray)
def glEdgeFlagPointerEXT( stride,count,pointer ):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLvoidpArray)
def glGetPointervEXT( pname,params ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,ctypes.c_void_p)
def glIndexPointerEXT( type,stride,count,pointer ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,ctypes.c_void_p)
def glNormalPointerEXT( type,stride,count,pointer ):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,ctypes.c_void_p)
def glTexCoordPointerEXT( size,type,stride,count,pointer ):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,ctypes.c_void_p)
def glVertexPointerEXT( size,type,stride,count,pointer ):pass
def glInitVertexArrayEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
|
{
"content_hash": "ea97d83365a476f648306479d966fdc8",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 76,
"avg_line_length": 42.391752577319586,
"alnum_prop": 0.7621595330739299,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "72cd9d8374fd68da8208ff982ca0849c42c72609",
"size": "4112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/EXT/vertex_array.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
}
|
"""
contains some chess-related utility functions
"""
from cmd_util import input_int
from pieces import ChessPiece
def format_board(board, row_count, col_count):
"""
convert a `board` into string than can be shown in console
board: a dict { (row_num, col_num) => piece_symbol }
row_count: number of rows
col_count: number of columns
"""
sep_line = '-' * (col_count * 4 + 1)
lines = [sep_line]
for row_num in range(row_count):
lines.append(
'| ' + ' | '.join([
board.get((row_num, col_num), ' ')
for col_num in range(col_count)
]) + ' |'
)
lines.append(sep_line)
return '\n'.join(lines)
def make_random_board(row_count, col_count, density=0.5):
"""create a random chess board with given size and density"""
import random
board = {}
for row_num in range(row_count):
for col_num in range(col_count):
factor = random.random() / density
if factor >= 1:
continue
index = int(factor * len(ChessPiece.class_list))
board[(row_num, col_num)] = ChessPiece.class_list[index].symbol
return board
def test_format_random_board(density=0.5):
"""test `format_random_board` function"""
while True:
row_count = input_int('Number of rows: ', minimum=2, default=0)
if row_count == 0:
break
col_count = input_int('Number of columns: ', minimum=2)
board = make_random_board(row_count, col_count, density)
print(format_board(board, row_count, col_count))
print('\n\n')
if __name__ == '__main__':
test_format_random_board(density=0.5)
|
{
"content_hash": "c7e55767ec1b1b945298c668f9b84851",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 29.894736842105264,
"alnum_prop": 0.5757042253521126,
"repo_name": "ilius/chess-challenge",
"id": "56d8c69f00b416c13817a05dba3550cea37a2dd6",
"size": "1704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chess_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21256"
}
],
"symlink_target": ""
}
|
'''
$Id$
myexc.py -- "my exceptions" demo which highlights user-created
exceptions. NOTE: this example does not currently work with
JPython as neither the errno nor tempfile modules have been
implemented, and also, the socket module is incomplete.
'''
# import all our needed modules
import os, socket, errno, types, tempfile
# create our a new NetworkError exception, derived from IOError
class NetworkError(IOError):
pass
# create our a new FileError exception, derived from IOError
class FileError(IOError):
pass
# updArgs --> tuple
def updArgs(args, newarg=None):
'''updArgs(args, newarg=None) -- if instance, grab each exception
instance argument and place them in a list; otherwise, just
convert given args sequence to a list for mutability; add
newarg if necessary; then convert the whole thing to a tuple.'''
if isinstance(args, IOError):
myargs = []
myargs.extend([arg for arg in args])
else:
myargs = list(args)
if newarg:
myargs.append(newarg)
return tuple(myargs)
# fileArgs --> tuple
def fileArgs(fn, mode, args):
'''fileArgs(fn, mode, args) -- similar to updArgs() except made
specifically for files; creates small permission string and
formats error to be similar to other IOError exceptions.'''
if args[0] == errno.EACCES and \
'access' in dir(os):
perms = ''
permd = { 'r': os.R_OK, 'w': os.W_OK, \
'x': os.X_OK }
pkeys = permd.keys()
pkeys.sort()
pkeys.reverse()
for eachPerm in 'rwx':
if os.access(fn, permd[eachPerm]):
perms = perms + eachPerm
else:
perms = perms + '-'
if isinstance(args, IOError):
myargs = []
myargs.extend([arg for arg in args])
else:
myargs = list(args)
myargs[1] = "'%s' %s (perms: '%s')" % \
(mode, myargs[1], perms)
myargs.append(args.filename)
else:
myargs = args
return tuple(myargs)
# myconnect() --> None (raises exception on error)
def myconnect(sock, host, port):
'''myconnect(sock, host, port) -- attempt to make a network connection
with the given socket and host-port pair; raises our new NetworkError
exception and collates error number and reason.'''
try:
sock.connect(host, port)
except socket.error, args:
myargs = updArgs(args) # convert inst to tuple
if len(myargs) == 1: # no #s on some errors
myargs = (errno.ENXIO, myargs[0])
raise NetworkError, \
updArgs(myargs, host + ':' + str(port))
# myopen() --> file object
def myopen(fn, mode='r'):
'''myopen(fn, mode) -- wrapper around the open() built-in function
such that we raise our new FileError exception on an error situation
and collate a set of FileError exception arguments to pass to the user'''
try:
fo = open(fn, mode)
except IOError, args:
raise FileError, fileArgs(fn, mode, args)
return fo
# testfile() --> None
def testfile():
'''testfile() -- runs the file tester, setting a variety of test files
which should generate FileError exceptions'''
fn = tempfile.mktemp()
f = open(fn, 'w')
f.close()
for eachTest in ((0, 'r'), (0100, 'r'), (0400, 'w'), (0500, 'w')):
try:
os.chmod(fn, eachTest[0])
f = myopen(fn, eachTest[1])
except FileError, args:
print "%s: %s" % \
(args.__class__.__name__, args)
else:
print fn, "opened ok... perms ignored"
f.close()
os.chmod(fn, 0777)
os.unlink(fn)
# testnet() --> None
def testnet():
'''testfile() -- runs the network tester, making various connections
which should generate NetworkError exceptions'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for eachHost in (YOUR HOSTS HERE):
try:
myconnect(s, eachHost, 80)
except NetworkError, args:
print "%s: %s" % (args.__class__.__name__, args)
else:
print "network connection successful to", `eachHost`
s.close()
# run tests if invoked as a script
if __name__ == '__main__':
testfile()
testnet()
|
{
"content_hash": "6123acb437fb0ae3e81a01e341956fde",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 77,
"avg_line_length": 29.31168831168831,
"alnum_prop": 0.5686752326096588,
"repo_name": "github641/python-journey",
"id": "10159f02a898ab4d00709ec4106653bc22190e89",
"size": "4537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myexc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "43850"
}
],
"symlink_target": ""
}
|
import os
import sys
import warnings
import getopt
from shutil import move
from os import remove, close
"""Modify meta information in raw experimental data.
While comparing algorithms with the bbob_proc package, it is sometimes
needed to change the algorithm name (given as algId in the :file`.info`
files) or the algorithm comments after a run is already finished (for
example because two output folders contain results for two different
algorithms but with the same name). This script allows to change these
within a specified output folder.
written: db 28/01/2010
db 26/06/2013 corrected documentation
"""
__all__ = ['main']
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def usage():
print main.__doc__
def main(argv=None):
"""Main routine.
This script allows to change algorithm name (algId) and algorithm
comment after a run finished, i.e., after an output folder has been
created.
:param seq argv: list of strings containing options and arguments.
If not provided, sys.argv is accessed.
:py:data:`argv` should list an output folder (first argument) and
additionally an algorithm name (2nd argument) and the algorithm
comment (3rd argument).
If only the output folder is given, the script asks for an algorithm
name and a comment interactively.
-h, --help
display this message
-v, --verbose
verbose mode, prints out operations. When not in verbose mode, no
output is to be expected, except for errors.
Examples:
* Changing algorithm name and comments for given output folder from the
command line::
$ python bbob_pproc/changeAlgIdAndComment.py outfolder "CMA-ES" "CMA_with_lambda_100"
* Changing algorithm name and comments for given output folder
interactively::
$ python bbob_pproc/changeAlgIdAndComment.py outputfolder
"""
if argv is None:
argv = sys.argv[1:]
try:
try:
opts, args = getopt.getopt(argv, "hv",
["help", "verbose"])
except getopt.error, msg:
raise Usage(msg)
if not (args):
usage()
sys.exit()
verbose = False
#Process options
for o, a in opts:
if o in ("-v","--verbose"):
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "unhandled option"
# check if all arguments are there and ask for them if not:
if len(args) < 3:
if len(args) < 2:
name = raw_input("You forgot to specify an algorithm name. Please enter one (algId):")
args.append(name)
comment = raw_input("You forgot to specify a comment. Please enter one for algorithm " + args[1] + ":")
args.append(comment)
folder = args[0]
# make sure that folder name ends with a '/' to be able to append
# the file names afterwards
if not folder.endswith('/'):
folder = folder + '/'
algId = args[1]
comment = args[2]
if not os.path.exists(folder):
print "ERROR: folder " + folder + " does not exist!"
sys.exit()
if not os.path.isdir(folder):
print "ERROR: " + folder + " is not a directory"
sys.exit()
# get all .info files in folder:
FILES = []
for (path, dirs, files) in os.walk(folder):
for fname in files:
if fname.endswith('.info'):
FILES.append(os.path.join(path, fname))
for file in FILES:
# open file to read and temp file to write
infile = open(file,'r')
tempfile = open('temp.temp','w')
while infile:
line = infile.readline()
if not line:
break
# make sure that everything is copied:
newline = line
# check if something needs to be changed:
if line.find('algId') >= 0:
s = line.split()
n = 0 # compute position of 'algId'
for word in s:
n = n+1
if word=='algId':
break
# replace algId:
s = s[0:n+1]
s.append("'" + algId + "'\n")
newline = " ".join(s)
else:
s = line.split()
if '%'==s[0]:
newline = "% " + comment + "\n"
tempfile.write(newline)
infile.close()
tempfile.close()
# remove old file and rename temp file accordingly
remove(file)
move('temp.temp', file)
print(file + " changed")
sys.exit()
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use -h or --help"
return 2
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "10bb119d8c1137504a91a6fb68b73df7",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 115,
"avg_line_length": 30.517045454545453,
"alnum_prop": 0.5200148948054366,
"repo_name": "oaelhara/numbbo",
"id": "541e2ee578287feee8994e789099a5601c1d337f",
"size": "5418",
"binary": false,
"copies": "4",
"ref": "refs/heads/development",
"path": "code-postprocessing/bbob_pproc/changeAlgIdAndComment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "738"
},
{
"name": "C",
"bytes": "810419"
},
{
"name": "C++",
"bytes": "51312"
},
{
"name": "Groff",
"bytes": "14660"
},
{
"name": "HTML",
"bytes": "190376"
},
{
"name": "Java",
"bytes": "13954"
},
{
"name": "JavaScript",
"bytes": "17374"
},
{
"name": "Makefile",
"bytes": "457"
},
{
"name": "Matlab",
"bytes": "63089"
},
{
"name": "Python",
"bytes": "889491"
},
{
"name": "R",
"bytes": "1465"
},
{
"name": "Shell",
"bytes": "9502"
},
{
"name": "TeX",
"bytes": "121264"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from compose.management.commands import (
demo_data_compose,
init_app_compose,
init_app_compose_news,
)
class TestCommand(TestCase):
def test_demo_data(self):
""" Test the management command """
command = demo_data_compose.Command()
command.handle()
def test_init_app(self):
""" Test the management command """
command = init_app_compose.Command()
command.handle()
def test_init_app_news(self):
""" Test the management command """
command = init_app_compose.Command()
command.handle()
command = init_app_compose_news.Command()
command.handle()
|
{
"content_hash": "03eb87d567560c6d74e529746c2e56a2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 49,
"avg_line_length": 25.74074074074074,
"alnum_prop": 0.6244604316546762,
"repo_name": "pkimber/compose",
"id": "4598532abdc99b3650341abb1aa88722769740d0",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compose/tests/test_management_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17130"
},
{
"name": "HTML",
"bytes": "53261"
},
{
"name": "JavaScript",
"bytes": "9100"
},
{
"name": "Python",
"bytes": "113307"
},
{
"name": "Shell",
"bytes": "410"
}
],
"symlink_target": ""
}
|
import urllib
import hashlib
from datetime import datetime, timedelta, date
import os
from django import template
from django.conf import settings
from django.utils import timezone
from django.core.urlresolvers import reverse, resolve
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from postmarkup.parser import create, pygments_available, SimpleTag
from boski.helpers import generate_thumb
register = template.Library()
_postmarkup = create(use_pygments=pygments_available, annotate_links=False)
for i in xrange(1, 7):
_postmarkup.tag_factory.add_tag(SimpleTag, 'h%d' % i, 'h%d' % i)
render_bbcode = _postmarkup.render_to_html
@register.filter
def rm_kurwa(text):
"""
Bardzo prosty filter, opierajacy swoje dzialanie na regexpie.
Dzielimy ciag znakow naczesci po regexpier ktory zawiera przeklenstwa.
"""
import re
kurwa_regex = '(?i)chuj|chuje|chuji|chujki|kurwa|kurwy|dziwka|dziwki|' \
'dziwko|kutas|kutasy|kutasie|suka|suki|suko|suczko|skurwiel' \
'|skurwielu|skurwiele|skurwieli|cwel|cwele|cweli|peneras' \
'|chwdp|hwdp'
return '#$%^&*'.join(re.split(kurwa_regex, text))
rm_kurwa.is_save = True
@register.filter(name='dir')
def do_dir(obj):
""" Filter that returns dir from object """
return dir(obj)
do_dir.is_save = True
@register.filter
def show_sexy_date(date):
"""
Wyświetla ładnie sformatowaną datę, w postaci 'wczoraj o 12:34',
'dziś o 8:12'
:param : datetime
"""
if date.__class__ == 'NoneType' or date is None or date == '':
return _('never')
months = {
1: _('of january'), 2: _('of february'), 3: _('of march'),
4: _('of april'), 5: _('of may'), 6: ('of june'), 7: _('of july'),
8: _('of august'), 9: _('of september'), 10: _('of october'),
11: _('of november'), 12: _('of ecember'),
}
now = datetime.now()
if date.tzinfo is not None:
default_timezone = timezone.get_default_timezone()
now = timezone.make_aware(now, default_timezone)
days = now.day - date.day
minutes = now.minute - date.minute
delta = now - date
if delta.seconds.__div__(60) == 0 and delta.days == 0:
return _('just now')
elif delta.seconds.__div__(60) == 1 and delta.days == 0:
return _('minute ago')
elif delta.seconds.__div__(60) in (2, 3, 4) and delta.days == 0:
return _('%d minutes ago') % delta.seconds.__div__(60)
elif 5 < delta.seconds.__div__(60) < 60 and delta.days == 0:
return _('%d minutes ago') % delta.seconds.__div__(60)
elif delta.days < 1 and days == 0:
return _('today at %d:%02d') % (date.hour, date.minute)
elif delta.days < 2 and days == 1:
return _('yesterday at %d:%02d') % (date.hour, date.minute)
elif delta.days < 3 and days == 2:
return _('two days ago at %d:%02d') % (date.hour, date.minute)
else:
return _('%d %s %d at %d:%02d') % (
date.day,
months[date.month],
date.year,
date.hour,
date.minute,
)
@register.simple_tag
def gravatar(email, size=48):
"""
Returns url to gravatar icon for given email, with size
{% gravatar comment.user_email [size] %}
<img src="{% gravatar mail@company.com 64 %}" />
"""
url = 'http://www.gravatar.com/avatar.php?%s' % urllib.urlencode({
'gravatar_id': hashlib.md5(email).hexdigest(),
'size': str(size),
})
return url
@register.simple_tag
def url_namespace(request, name, params_string=''):
"""
Return URL in current namespace
{% url gallery:album-overview 'id=123' %}
{% url_namespace request 'edit' [params] %}
"""
params = {}
if params_string:
params_parts = params_string.split(',')
count_assignment = params_string.count('=')
if params_parts.__len__() != count_assignment:
raise Exception('Only kwargs!')
for row in params_parts:
tmp = row.split('=')
params[tmp[0]] = tmp[1]
namespace = resolve(request.META['PATH_INFO']).namespace
url = reverse('%s:%s' % (namespace, name), kwargs=params)
return url
@register.filter
def prepend_namespace(value, request):
""" Prepends a namespace to given value """
namespace = resolve(request.META['PATH_INFO']).namespace
name = '%s:%s' % (namespace, value)
return name
_t = """
namefile: usertags.py
You would need a template nest for every method, for example to online_users.
/templates/tag/online_users.html
{% if user %}
<ul>
{% for user in user %}
<li>{{user.username}}</li>
{% endfor %}
</ul>
{% endif %}
to load
{% load usertags %}
{% online_users 5 %}
{% last_registers 5 %}
{% last_logins 5 %}
"""
@register.inclusion_tag('utils/tags/online_users.html')
def online_users(num):
""" Show users that were active in last hour. """
one_hour_ago = datetime.now() - timedelta(hours=1)
sql_datetime = datetime.strftime(one_hour_ago, '%Y-%m-%d %H:%M:%S')
users = User.objects\
.filter(last_login__gt=sql_datetime, is_active__exact=1)\
.order_by('-last_login')[:num]
return {'user': users, }
@register.inclusion_tag('utils/tags/last_registers.html')
def last_registers(num):
""" Show last registered users """
users = User.objects.filter(is_active__exact=1)\
.order_by('-date_joined')[:num]
return {'user': users}
@register.inclusion_tag('utils/tags/last_logins.html')
def last_logins(num):
"""Show last num logged in users (it does shows only
that logged in by form)"""
users = User.objects.filter(is_active__exact=1)\
.order_by('-last_login')[:num]
return {'user': users, }
@register.filter
def get_attr(obj, name):
""" Try to return attribute by name from obj, otherwise returns None """
if hasattr(obj, name):
return getattr(obj, name)
return None
@register.filter
def get_key(obj, name):
""" Try to return value by name from obj, otherwise returns None """
return obj.get(name, None)
@register.simple_tag
def thumbnail(img_url, height=200, width=120, crop=True):
"""
Generowanie miniaturek relatywnego url
"""
if not img_url:
return None
class img:
url = img_url
filename = img_url.split('/')[-1]
filename_root = ".".join(img_url.split('/')[-1].split('.')[:-1])
folder = "/".join(img_url.split('/')[:-1]).replace(
settings.MEDIA_URL, settings.MEDIA_ROOT + "/")
thumb_name = "%s_thumb%dx%d.jpg" % (img.filename_root, height, width)
if not os.path.exists("%s/%s" % (img.folder, thumb_name)):
try:
thumb = generate_thumb(open("%s/%s" % (img.folder, img.filename)),
(height, width), 'jpg', crop)
except Exception:
# todo: log this exception
return ''
fh = open("%s/%s" % (img.folder, thumb_name), 'w')
thumb.seek(0)
fh.write(thumb.read())
fh.close()
return img.url.replace(img.filename, thumb_name)
@register.filter
def fb_thumbnail(img, args=None):
"""
Generowanie miniaturek z pól FileBrowserField
"""
if not img:
return img
height, width, crop = 200, 120, True
if args is not None:
args = [arg.strip() for arg in args.split(',')]
height = int(args[0])
width = int(args[1])
if args.__len__() == 3 and args[2] == '0':
crop = False
thumb_name = "%s_thumb%dx%d.jpg" % (img.filename_root, height, width)
thumb_path = os.path.exists("%s/%s" % (
img.path.replace(img.filename, ''), thumb_name))
if not os.path.exists(thumb_path):
try:
thumb = generate_thumb(
open(img.path), (height, width), 'jpg', crop)
except IOError: # brak pliku
return ''
except Exception: # lol
return ''
fh = open("%s/%s" % (
img.path.replace(img.filename, ''), thumb_name), 'w')
thumb.seek(0)
fh.write(thumb.read())
fh.close()
return img.url.replace(img.filename, thumb_name)
def create_dir_hash_structure(hash_name, under_path, blocs=4):
""" Create dir structure from given name """
created_part = ''
for i in xrange(0, blocs):
created_part = os.path.join(created_part, hash_name[i * 2:(i + 1) * 2])
try:
os.mkdir(os.path.join(under_path, created_part))
except Exception, e:
if e.errno == 17: # file (dir in this case) exist error
pass
else: # re-raise
raise
return os.path.join(under_path, created_part), created_part
@register.filter
def thumbnail2(img_url, args=None):
"""
Generowanie miniaturek relatywnego url
"""
if not img_url:
return img_url
class img:
url = img_url
filename = img_url.split('/')[-1]
filename_root = ".".join(img_url.split('/')[-1].split('.')[:-1])
folder = "/".join(img_url.split('/')[:-1]).replace(
settings.MEDIA_URL, settings.MEDIA_ROOT + "/")
height, width, crop = 200, 120, True
if args is not None:
args = [arg.strip() for arg in args.split(',')]
height = int(args[0])
width = int(args[1])
if args.__len__() == 3 and args[2] == '0':
crop = False
thumb_name = "%s_thumb%dx%d.jpg" % (img.filename_root, height, width)
if not os.path.exists("%s/%s" % (img.folder, thumb_name)):
try:
thumb = generate_thumb(open("%s/%s" % (
img.folder, img.filename)
), (height, width), 'jpg', crop)
except Exception:
# import traceback
# traceback.print_exc()
return ''
fh = open("%s/%s" % (img.folder, thumb_name), 'w')
thumb.seek(0)
fh.write(thumb.read())
fh.close()
return img.url.replace(img.filename, thumb_name)
@register.simple_tag()
def thumbnail_c(img_url, height=200, width=120, crop=True):
"""
@param img_url: ImageFieldFile
Generowanie miniaturek relatywnego url
"""
if not img_url:
return img_url
if img_url.__class__.__name__ == 'ImageFieldFile':
img_url = img_url.name
img_url = urllib.unquote(img_url)
filename = img_url.split('/')[-1] # nazwa pliku
filename_root = ".".join(img_url.split('/')[-1].split('.')[:-1])
folder = "/".join(img_url.split('/')[:-1]).replace(
settings.MEDIA_URL, settings.MEDIA_ROOT + "/") # ścieżka do miniaturki
if not folder.startswith(settings.MEDIA_ROOT):
folder = '%s/%s' % (settings.MEDIA_ROOT, folder)
thumb_name = "%s.jpg" % hashlib.sha256("%s_thumb%dx%d.jpg" % (
filename_root, height, width)).hexdigest()
created_path, hash_part = create_dir_hash_structure(
thumb_name, settings.MEDIA_ROOT + '/photo', 7)
full_thumbnail_path = "%s/%s" % (
created_path, thumb_name.replace(''.join(hash_part.split('/')), ''))
full_thumbnail_url = "photo/%s/%s" % (
hash_part, thumb_name.replace(''.join(hash_part.split('/')), ''))
if not os.path.exists(full_thumbnail_path):
try:
with open("%s/%s" % (folder, filename)) as fh:
thumb = generate_thumb(fh, (height, width), 'jpg', crop)
except IOError, e:
return None
if thumb is None:
return None
with open(full_thumbnail_path, 'w') as fh:
thumb.seek(0)
fh.write(thumb.read())
return full_thumbnail_url
@register.filter
def match_url(url, match_with):
""" Checks if match_with starts with given url """
return url.startswith(match_with)
@register.filter
def exact_match_url(url, match_with):
""" Checks if given url is identical to match_with """
return url == match_with
@register.simple_tag(takes_context=True)
def try_to_include(context, template_name):
"""
Try to include a template, if doesn't exists, return empty string.
Usage: {% try_to_include "head.html" %}
"""
try:
return template.loader.get_template(template_name).render(context)
except template.TemplateDoesNotExist:
return ''
@register.filter
def get_default_path(value, request):
"""
Returns default path for templates inclusion
"""
namespace = resolve(request.META['PATH_INFO']).namespace
if ':' in namespace:
namespace = '/'.join(namespace.split(':'))
path = '%s/%s' % (namespace, value)
return path
@register.filter
def within_time(datetime_obj, value):
"""
Checks if datetime_obj is within given period
Suffixes:
d - days
h - hours
m - minutes
s - secounds
@param datetime_obj: datetime or date
@param value: string - must match (\d+)(d|h|m|s)
@return:
"""
value, suffix = int(value[:-1]), value[-1]
if isinstance(datetime_obj, date):
datetime_obj = datetime.combine(datetime_obj, datetime.min.time())
elif not isinstance(datetime_obj, datetime): # not a datetime/date object
raise Exception(_('Instance of `date` or `datetime` expected.'))
if suffix == 'd':
return datetime.now() < datetime_obj + timedelta(days=value)
elif suffix == 'h':
return datetime.now() < datetime_obj + timedelta(hours=value)
elif suffix == 'm':
return datetime.now() < datetime_obj + timedelta(minutes=value)
elif suffix == 's':
return datetime.now() < datetime_obj + timedelta(seconds=value)
else:
raise Exception(_('Incorrect suffix supplied'))
|
{
"content_hash": "573d6b109f820aca5858578acef8acc1",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 80,
"avg_line_length": 29.41276595744681,
"alnum_prop": 0.5844907407407407,
"repo_name": "Alkemic/webpage",
"id": "0b124c4f59aa26bdee1d8de08042df3166d2e8e9",
"size": "13855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boski/templatetags/utils_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4649"
},
{
"name": "JavaScript",
"bytes": "287"
},
{
"name": "Python",
"bytes": "115710"
}
],
"symlink_target": ""
}
|
"""
Contains functionality to use a X10 dimmer over Mochad.
For more details about this platform, please refer to the documentation at
https://home.assistant.io/components/light.mochad/
"""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light, PLATFORM_SCHEMA)
from homeassistant.components import mochad
from homeassistant.const import (
CONF_NAME, CONF_PLATFORM, CONF_DEVICES, CONF_ADDRESS)
from homeassistant.helpers import config_validation as cv
DEPENDENCIES = ['mochad']
_LOGGER = logging.getLogger(__name__)
CONF_BRIGHTNESS_LEVELS = 'brightness_levels'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PLATFORM): mochad.DOMAIN,
CONF_DEVICES: [{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): cv.x10_address,
vol.Optional(mochad.CONF_COMM_TYPE): cv.string,
vol.Optional(CONF_BRIGHTNESS_LEVELS, default=32):
vol.All(vol.Coerce(int), vol.In([32, 64, 256])),
}]
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up X10 dimmers over a mochad controller."""
devs = config.get(CONF_DEVICES)
add_entities([MochadLight(
hass, mochad.CONTROLLER.ctrl, dev) for dev in devs])
return True
class MochadLight(Light):
"""Representation of a X10 dimmer over Mochad."""
def __init__(self, hass, ctrl, dev):
"""Initialize a Mochad Light Device."""
from pymochad import device
self._controller = ctrl
self._address = dev[CONF_ADDRESS]
self._name = dev.get(CONF_NAME,
'x10_light_dev_{}'.format(self._address))
self._comm_type = dev.get(mochad.CONF_COMM_TYPE, 'pl')
self.light = device.Device(ctrl, self._address,
comm_type=self._comm_type)
self._brightness = 0
self._state = self._get_device_status()
self._brightness_levels = dev.get(CONF_BRIGHTNESS_LEVELS) - 1
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
def _get_device_status(self):
"""Get the status of the light from mochad."""
with mochad.REQ_LOCK:
status = self.light.get_status().rstrip()
return status == 'on'
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def is_on(self):
"""Return true if the light is on."""
return self._state
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_BRIGHTNESS
@property
def assumed_state(self):
"""X10 devices are normally 1-way so we have to assume the state."""
return True
def _calculate_brightness_value(self, value):
return int(value * (float(self._brightness_levels) / 255.0))
def _adjust_brightness(self, brightness):
if self._brightness > brightness:
bdelta = self._brightness - brightness
mochad_brightness = self._calculate_brightness_value(bdelta)
self.light.send_cmd("dim {}".format(mochad_brightness))
self._controller.read_data()
elif self._brightness < brightness:
bdelta = brightness - self._brightness
mochad_brightness = self._calculate_brightness_value(bdelta)
self.light.send_cmd("bright {}".format(mochad_brightness))
self._controller.read_data()
def turn_on(self, **kwargs):
"""Send the command to turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, 255)
with mochad.REQ_LOCK:
if self._brightness_levels > 32:
out_brightness = self._calculate_brightness_value(brightness)
self.light.send_cmd('xdim {}'.format(out_brightness))
self._controller.read_data()
else:
self.light.send_cmd("on")
self._controller.read_data()
# There is no persistence for X10 modules so a fresh on command
# will be full brightness
if self._brightness == 0:
self._brightness = 255
self._adjust_brightness(brightness)
self._brightness = brightness
self._state = True
def turn_off(self, **kwargs):
"""Send the command to turn the light on."""
with mochad.REQ_LOCK:
self.light.send_cmd('off')
self._controller.read_data()
# There is no persistence for X10 modules so we need to prepare
# to track a fresh on command will full brightness
if self._brightness_levels == 31:
self._brightness = 0
self._state = False
|
{
"content_hash": "062feb0a0fa94dace1c8c021d913f847",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 35.583941605839414,
"alnum_prop": 0.6147692307692307,
"repo_name": "tinloaf/home-assistant",
"id": "2e68c369ba644739bc74d1a59d812c2a5975db4c",
"size": "4875",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/light/mochad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class ApplicationGateway(Resource):
"""Application gateway resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: SKU of the application gateway resource.
:type sku: ~azure.mgmt.network.v2017_03_01.models.ApplicationGatewaySku
:param ssl_policy: SSL policy of the application gateway resource.
:type ssl_policy:
~azure.mgmt.network.v2017_03_01.models.ApplicationGatewaySslPolicy
:ivar operational_state: Operational state of the application gateway
resource. Possible values include: 'Stopped', 'Starting', 'Running',
'Stopping'
:vartype operational_state: str or
~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayOperationalState
:param gateway_ip_configurations: Subnets of application the gateway
resource.
:type gateway_ip_configurations:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayIPConfiguration]
:param authentication_certificates: Authentication certificates of the
application gateway resource.
:type authentication_certificates:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayAuthenticationCertificate]
:param ssl_certificates: SSL certificates of the application gateway
resource.
:type ssl_certificates:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewaySslCertificate]
:param frontend_ip_configurations: Frontend IP addresses of the
application gateway resource.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayFrontendIPConfiguration]
:param frontend_ports: Frontend ports of the application gateway resource.
:type frontend_ports:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayFrontendPort]
:param probes: Probes of the application gateway resource.
:type probes:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayProbe]
:param backend_address_pools: Backend address pool of the application
gateway resource.
:type backend_address_pools:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayBackendAddressPool]
:param backend_http_settings_collection: Backend http settings of the
application gateway resource.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayBackendHttpSettings]
:param http_listeners: Http listeners of the application gateway resource.
:type http_listeners:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayHttpListener]
:param url_path_maps: URL path map of the application gateway resource.
:type url_path_maps:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayUrlPathMap]
:param request_routing_rules: Request routing rules of the application
gateway resource.
:type request_routing_rules:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayRequestRoutingRule]
:param web_application_firewall_configuration: Web application firewall
configuration.
:type web_application_firewall_configuration:
~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayWebApplicationFirewallConfiguration
:param resource_guid: Resource GUID property of the application gateway
resource.
:type resource_guid: str
:param provisioning_state: Provisioning state of the application gateway
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'operational_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'properties.sku', 'type': 'ApplicationGatewaySku'},
'ssl_policy': {'key': 'properties.sslPolicy', 'type': 'ApplicationGatewaySslPolicy'},
'operational_state': {'key': 'properties.operationalState', 'type': 'str'},
'gateway_ip_configurations': {'key': 'properties.gatewayIPConfigurations', 'type': '[ApplicationGatewayIPConfiguration]'},
'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[ApplicationGatewayAuthenticationCertificate]'},
'ssl_certificates': {'key': 'properties.sslCertificates', 'type': '[ApplicationGatewaySslCertificate]'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[ApplicationGatewayFrontendIPConfiguration]'},
'frontend_ports': {'key': 'properties.frontendPorts', 'type': '[ApplicationGatewayFrontendPort]'},
'probes': {'key': 'properties.probes', 'type': '[ApplicationGatewayProbe]'},
'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'},
'backend_http_settings_collection': {'key': 'properties.backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHttpSettings]'},
'http_listeners': {'key': 'properties.httpListeners', 'type': '[ApplicationGatewayHttpListener]'},
'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[ApplicationGatewayUrlPathMap]'},
'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[ApplicationGatewayRequestRoutingRule]'},
'web_application_firewall_configuration': {'key': 'properties.webApplicationFirewallConfiguration', 'type': 'ApplicationGatewayWebApplicationFirewallConfiguration'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, sku=None, ssl_policy=None, gateway_ip_configurations=None, authentication_certificates=None, ssl_certificates=None, frontend_ip_configurations=None, frontend_ports=None, probes=None, backend_address_pools=None, backend_http_settings_collection=None, http_listeners=None, url_path_maps=None, request_routing_rules=None, web_application_firewall_configuration=None, resource_guid: str=None, provisioning_state: str=None, etag: str=None, **kwargs) -> None:
super(ApplicationGateway, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.sku = sku
self.ssl_policy = ssl_policy
self.operational_state = None
self.gateway_ip_configurations = gateway_ip_configurations
self.authentication_certificates = authentication_certificates
self.ssl_certificates = ssl_certificates
self.frontend_ip_configurations = frontend_ip_configurations
self.frontend_ports = frontend_ports
self.probes = probes
self.backend_address_pools = backend_address_pools
self.backend_http_settings_collection = backend_http_settings_collection
self.http_listeners = http_listeners
self.url_path_maps = url_path_maps
self.request_routing_rules = request_routing_rules
self.web_application_firewall_configuration = web_application_firewall_configuration
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
|
{
"content_hash": "1ed6a6308637e6c7b6db4367af7fc132",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 524,
"avg_line_length": 58.44117647058823,
"alnum_prop": 0.7155259184700553,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "9cfb32ac4edc4df2b556e014d692eda70bfef7f2",
"size": "8422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/application_gateway_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Handle intents with scripts."""
import copy
import logging
import voluptuous as vol
from homeassistant.helpers import config_validation as cv, intent, script, template
DOMAIN = "intent_script"
CONF_INTENTS = "intents"
CONF_SPEECH = "speech"
CONF_ACTION = "action"
CONF_CARD = "card"
CONF_TYPE = "type"
CONF_TITLE = "title"
CONF_CONTENT = "content"
CONF_TEXT = "text"
CONF_ASYNC_ACTION = "async_action"
DEFAULT_CONF_ASYNC_ACTION = False
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
cv.string: {
vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(
CONF_ASYNC_ACTION, default=DEFAULT_CONF_ASYNC_ACTION
): cv.boolean,
vol.Optional(CONF_CARD): {
vol.Optional(CONF_TYPE, default="simple"): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Required(CONF_CONTENT): cv.template,
},
vol.Optional(CONF_SPEECH): {
vol.Optional(CONF_TYPE, default="plain"): cv.string,
vol.Required(CONF_TEXT): cv.template,
},
}
}
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Activate Alexa component."""
intents = copy.deepcopy(config[DOMAIN])
template.attach(hass, intents)
for intent_type, conf in intents.items():
if CONF_ACTION in conf:
conf[CONF_ACTION] = script.Script(
hass, conf[CONF_ACTION], f"Intent Script {intent_type}"
)
intent.async_register(hass, ScriptIntentHandler(intent_type, conf))
return True
class ScriptIntentHandler(intent.IntentHandler):
"""Respond to an intent with a script."""
def __init__(self, intent_type, config):
"""Initialize the script intent handler."""
self.intent_type = intent_type
self.config = config
async def async_handle(self, intent_obj):
"""Handle the intent."""
speech = self.config.get(CONF_SPEECH)
card = self.config.get(CONF_CARD)
action = self.config.get(CONF_ACTION)
is_async_action = self.config.get(CONF_ASYNC_ACTION)
slots = {key: value["value"] for key, value in intent_obj.slots.items()}
if action is not None:
if is_async_action:
intent_obj.hass.async_create_task(
action.async_run(slots, intent_obj.context)
)
else:
await action.async_run(slots)
response = intent_obj.create_response()
if speech is not None:
response.async_set_speech(
speech[CONF_TEXT].async_render(slots), speech[CONF_TYPE]
)
if card is not None:
response.async_set_card(
card[CONF_TITLE].async_render(slots),
card[CONF_CONTENT].async_render(slots),
card[CONF_TYPE],
)
return response
|
{
"content_hash": "760419dff3e7ba251f7dde5ef1802841",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 83,
"avg_line_length": 29.737864077669904,
"alnum_prop": 0.5719882468168462,
"repo_name": "postlund/home-assistant",
"id": "38f93ed35061254628c0cd2af59eaa97e4f9f176",
"size": "3063",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/intent_script/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
}
|
from .async_client import GameServerDeploymentsServiceAsyncClient
from .client import GameServerDeploymentsServiceClient
__all__ = (
"GameServerDeploymentsServiceClient",
"GameServerDeploymentsServiceAsyncClient",
)
|
{
"content_hash": "2033b10c9d69c1b4a55538a3cf773bb0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.8266666666666667,
"repo_name": "googleapis/python-game-servers",
"id": "2e7bfd824404e6c766725cd28c629702e2104791",
"size": "825",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "google/cloud/gaming_v1/services/game_server_deployments_service/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2394740"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
}
|
import contextlib
import json
import logging
import unittest
import secrets
from http import HTTPStatus
import multiprocessing as mp
from flask import Flask
from flask_testing import TestCase
from fedlearner_webconsole.composer.composer import Composer, ComposerConfig
from fedlearner_webconsole.db import db_handler as db, get_database_uri
from fedlearner_webconsole.app import create_app
from fedlearner_webconsole.initial_db import initial_db
from fedlearner_webconsole.scheduler.scheduler import scheduler
# NOTE: the following models imported is intended to be analyzed by SQLAlchemy
from fedlearner_webconsole.auth.models import Role, User, State
from fedlearner_webconsole.composer.models import SchedulerItem, SchedulerRunner, OptimisticLock
from fedlearner_webconsole.utils.base64 import base64encode
def create_all_tables(database_uri: str = None):
if database_uri:
db.rebind(database_uri)
# If there's a db file due to some reason, remove it first.
if db.metadata.tables.values():
db.drop_all()
db.create_all()
class BaseTestCase(TestCase):
class Config(object):
SQLALCHEMY_DATABASE_URI = get_database_uri()
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_SECRET_KEY = secrets.token_urlsafe(64)
PROPAGATE_EXCEPTIONS = True
LOGGING_LEVEL = logging.DEBUG
TESTING = True
ENV = 'development'
GRPC_LISTEN_PORT = 1990
START_COMPOSER = False
def create_app(self):
create_all_tables(self.__class__.Config.SQLALCHEMY_DATABASE_URI)
initial_db()
app = create_app(self.__class__.Config)
return app
def setUp(self):
super().setUp()
self.signin_helper()
def tearDown(self):
self.signout_helper()
scheduler.stop()
db.drop_all()
super().tearDown()
def get_response_data(self, response):
return json.loads(response.data).get('data')
def signin_as_admin(self):
self.signout_helper()
self.signin_helper(username='admin', password='fl@123.')
def signin_helper(self, username='ada', password='fl@123.'):
resp = self.client.post('/api/v2/auth/signin',
data=json.dumps({
'username': username,
'password': base64encode(password)
}),
content_type='application/json')
resp_data = self.get_response_data(resp)
self.assertEqual(resp.status_code, HTTPStatus.OK)
self.assertTrue('access_token' in resp_data)
self.assertTrue(len(resp_data.get('access_token')) > 1)
self._token = resp_data.get('access_token')
return self._token
def signout_helper(self):
self._token = None
def _get_headers(self, use_auth=True):
headers = {}
if use_auth and self._token:
headers['Authorization'] = f'Bearer {self._token}'
return headers
def get_helper(self, url, use_auth=True):
return self.client.get(url, headers=self._get_headers(use_auth))
def post_helper(self, url, data, use_auth=True):
return self.client.post(url,
data=json.dumps(data),
content_type='application/json',
headers=self._get_headers(use_auth))
def put_helper(self, url, data, use_auth=True):
return self.client.put(url,
data=json.dumps(data),
content_type='application/json',
headers=self._get_headers(use_auth))
def patch_helper(self, url, data, use_auth=True):
return self.client.patch(url,
data=json.dumps(data),
content_type='application/json',
headers=self._get_headers(use_auth))
def delete_helper(self, url, use_auth=True):
return self.client.delete(url, headers=self._get_headers(use_auth))
def setup_project(self, role, peer_port):
if role == 'leader':
peer_role = 'follower'
else:
peer_role = 'leader'
name = 'test-project'
config = {
'participants': [{
'name': f'party_{peer_role}',
'url': f'127.0.0.1:{peer_port}',
'domain_name': f'fl-{peer_role}.com'
}],
'variables': [{
'name': 'EGRESS_URL',
'value': f'127.0.0.1:{peer_port}'
}]
}
create_response = self.post_helper('/api/v2/projects',
data={
'name': name,
'config': config,
})
self.assertEqual(create_response.status_code, HTTPStatus.OK)
return json.loads(create_response.data).get('data')
@contextlib.contextmanager
def composer_scope(self, config: ComposerConfig):
with self.app.app_context():
composer = Composer(config=config)
composer.run(db.engine)
yield composer
composer.stop()
class TestAppProcess(mp.get_context('spawn').Process):
def __init__(self, test_class, method, config=None, result_queue=None):
super(TestAppProcess, self).__init__()
self._test_class = test_class
self._method = method
self._app_config = config
self.queue = mp.get_context('spawn').Queue()
self.other_process_queues = []
self._result_queue = result_queue or mp.get_context('spawn').Queue()
def run(self):
try:
# remove all logging handlers to prevent logger sending test's logs to other place
for h in logging.getLogger().handlers[:]:
logging.getLogger().removeHandler(h)
h.close()
logging.basicConfig(
level=logging.DEBUG,
format=
'SPAWN:%(filename)s %(lineno)s %(levelname)s - %(message)s')
if self._app_config:
self._test_class.Config = self._app_config
test = self._test_class(self._method)
old_tear_down = test.tearDown
# because that other tests will use your rpc server or scheduler, so you should wait for
# others after you finish the test
def new_tear_down(*args, **kwargs):
# tell others that you has finished
for other_q in self.other_process_queues:
other_q.put(None)
# check if the test success, than wait others to finish
if not test._outcome.errors:
# wait for others
for i in range(len(self.other_process_queues)):
self.queue.get()
old_tear_down(*args, **kwargs)
test.tearDown = new_tear_down
suite = unittest.TestSuite([test])
result = unittest.TestResult()
result = suite.run(result)
if result.errors:
for method, err in result.errors:
print(
'======================================================================'
)
print('ERROR:', method)
print(
'----------------------------------------------------------------------'
)
print(err)
print(
'----------------------------------------------------------------------'
)
if result.failures:
for method, fail in result.failures:
print(
'======================================================================'
)
print('FAIL:', method)
print(
'----------------------------------------------------------------------'
)
print(fail)
print(
'----------------------------------------------------------------------'
)
assert result.wasSuccessful()
self._result_queue.put(True)
except Exception as err:
logging.error('expected happened %s', err)
self._result_queue.put(False)
raise
def multi_process_test(test_list):
result_queue = mp.get_context('spawn').Queue()
proc_list = [
TestAppProcess(t['class'], t['method'], t['config'], result_queue)
for t in test_list
]
for p in proc_list:
for other_p in proc_list:
if other_p != p:
p.other_process_queues.append(other_p.queue)
p.start()
# Waits for all processes get finished or any one gets an exception
for _ in proc_list:
succeed = result_queue.get()
if not succeed:
# Terminates all processes if any one gets an exception
# So that logs are more readable
for p in proc_list:
p.terminate()
break
for i, p in enumerate(proc_list):
p.join()
if p.exitcode != 0:
raise Exception(f'Subprocess failed: number {i}')
class NoWebServerTestCase(unittest.TestCase):
class Config(object):
SQLALCHEMY_DATABASE_URI = get_database_uri()
def setUp(self) -> None:
super().setUp()
create_all_tables(self.__class__.Config.SQLALCHEMY_DATABASE_URI)
def tearDown(self) -> None:
db.drop_all()
return super().tearDown()
|
{
"content_hash": "69f64abdad693bbb6a9022081469cf56",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 100,
"avg_line_length": 37.22264150943396,
"alnum_prop": 0.5132806163828062,
"repo_name": "bytedance/fedlearner",
"id": "fff83b5a03be466b1cf2c2dc91e87e270edcabcc",
"size": "10489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_console_v2/api/testing/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "25817"
},
{
"name": "CSS",
"bytes": "7766"
},
{
"name": "Dockerfile",
"bytes": "6341"
},
{
"name": "Go",
"bytes": "163506"
},
{
"name": "HTML",
"bytes": "3527"
},
{
"name": "JavaScript",
"bytes": "482972"
},
{
"name": "Less",
"bytes": "14981"
},
{
"name": "Lua",
"bytes": "8088"
},
{
"name": "Makefile",
"bytes": "2869"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Mustache",
"bytes": "35891"
},
{
"name": "Python",
"bytes": "2412335"
},
{
"name": "Shell",
"bytes": "118210"
},
{
"name": "TypeScript",
"bytes": "805827"
}
],
"symlink_target": ""
}
|
""" miscellaneous sorting / groupby utilities """
import numpy as np
from pandas.compat import long, string_types, PY3
from pandas.core.dtypes.common import (
_ensure_platform_int,
_ensure_int64,
is_list_like,
is_categorical_dtype)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas._libs import lib, algos, hashtable
from pandas._libs.hashtable import unique_label_indices
_INT64_MAX = np.iinfo(np.int64).max
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def loop(labels, shape):
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
return out
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return loop(labels, shape)
def maybe_lift(lab, size): # pormote nan values
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(_ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
return loop(list(labels), list(shape))
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(keys, orders=None, na_position='last'):
from pandas.core.categorical import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending, kind=kind)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isna(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, levels, labels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [hashtable.Int64HashTable(ngroups)
for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def get_flattened_iterator(comp_ids, ngroups, levels, labels):
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
return [mapper.get_key(i) for i in range(ngroups)]
def get_indexer_dict(label_list, keys):
""" return a diction of {labels} -> {indexers} """
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if is_int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(_ensure_int64(group_index),
ngroups)
return _ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = _ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if PY3 and lib.infer_dtype(values) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = _ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = _ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, _ensure_platform_int(new_labels)
|
{
"content_hash": "296ea3a10a44027f92f0235e5d1fb49f",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 79,
"avg_line_length": 32.88247422680412,
"alnum_prop": 0.61518685728618,
"repo_name": "ryfeus/lambda-packs",
"id": "27252b9616a445da6378791ddd252bad5d712a9c",
"size": "15948",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Pandas_numpy/source/pandas/core/sorting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
from django.db.models.sql import compiler
from django.db.models.sql.where import WhereNode
from django.db.models.sql.where import EmptyShortCircuit, EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.conf import settings
class SphinxWhereNode(WhereNode):
def sql_for_columns(self, data, qn, connection):
table_alias, name, db_type = data
return connection.ops.field_cast_sql(db_type) % name
def as_sql(self, qn, connection):
# TODO: remove this when no longer needed.
# This is to remove the parenthesis from where clauses.
# http://sphinxsearch.com/bugs/view.php?id=1150
sql, params = super(SphinxWhereNode, self).as_sql(qn, connection)
if sql and sql[0] == '(' and sql[-1] == ')':
# Trim leading and trailing parenthesis:
sql = sql[1:]
sql = sql[:-1]
return sql, params
def make_atom(self, child, qn, connection):
"""
Transform search, the keyword should not be quoted.
"""
lvalue, lookup_type, value_annot, params_or_value = child
sql, params = super(SphinxWhereNode, self).make_atom(child, qn, connection)
if lookup_type == 'search':
if hasattr(lvalue, 'process'):
try:
lvalue, params = lvalue.process(lookup_type, params_or_value, connection)
except EmptyShortCircuit:
raise EmptyResultSet
if isinstance(lvalue, tuple):
# A direct database column lookup.
field_sql = self.sql_for_columns(lvalue, qn, connection)
else:
# A smart object with an as_sql() method.
field_sql = lvalue.as_sql(qn, connection)
# TODO: There are a couple problems here.
# 1. The user _might_ want to search only a specific field.
# 2. However, since Django requires a field name to use the __search operator
# There is no way to do a search in _all_ fields.
# 3. Because, using multiple __search operators is not supported.
# So, we need to merge multiped __search operators into a single MATCH(), we
# can't do that here, we have to do that one level up...
# Ignore the field name, search all fields:
params = ('@* %s' % params[0], )
# _OR_ respect the field name, and search on it:
#params = ('@%s %s' % (field_sql, params[0]), )
return sql, params
class SphinxQLCompiler(compiler.SQLCompiler):
def get_columns(self, *args, **kwargs):
columns = super(SphinxQLCompiler, self).get_columns(*args, **kwargs)
for i, column in enumerate(columns):
if '.' in column:
columns[i] = column.partition('.')[2]
return columns
def quote_name_unless_alias(self, name):
# TODO: remove this when no longer needed.
# This is to remove the `` backticks from identifiers.
# http://sphinxsearch.com/bugs/view.php?id=1150
return name
# Set SQLCompiler appropriately, so queries will use the correct compiler.
SQLCompiler = SphinxQLCompiler
class SQLInsertCompiler(compiler.SQLInsertCompiler, SphinxQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SphinxQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SphinxQLCompiler):
def as_sql(self):
if getattr(settings, 'SPHINX_INDEX_RT', True):
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['REPLACE INTO %s' % qn(opts.db_table)]
# This is a bit ugly, we have to scrape information from the where clause
# and put it into the field/values list. Sphinx will not accept an UPDATE
# statement that includes full text data, only INSERT/REPLACE INTO.
lvalue, lookup_type, value_annot, params_or_value = self.query.where.children[0].children[0]
(table_name, column_name, column_type), val = lvalue.process(lookup_type, params_or_value, self.connection)
fields, values, params = [column_name], ['%s'], [val[0]]
# Now build the rest of the fields into our query.
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append(sql)
params.extend(params)
elif val is not None:
values.append(placeholder)
params.append(val)
else:
values.append('NULL')
fields.append(name)
result.append('(%s)' % ', '.join(fields))
result.append('VALUES (%s)' % ', '.join(values))
return ' '.join(result), params
else:
return super(SQLUpdateCompiler, self).as_sql()
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SphinxQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SphinxQLCompiler):
pass
|
{
"content_hash": "fc160ab252e0300c76cd6f566477d452",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 119,
"avg_line_length": 43.77272727272727,
"alnum_prop": 0.5950155763239875,
"repo_name": "jnormore/django-sphinx-db",
"id": "e96ae12cad7edfeb5a1a50ca20af285880fa6f78",
"size": "5778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_sphinx_db/backend/sphinx/compiler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20579"
}
],
"symlink_target": ""
}
|
import xml.etree.ElementTree as ET
import socket
import vdebug.log
import base64
import time
import errno
""" Response objects for the DBGP module."""
class Response:
"""Contains response data from a command made to the debugger."""
ns = '{urn:debugger_protocol_v1}'
def __init__(self,response,cmd,cmd_args,api):
self.response = response
self.cmd = cmd
self.cmd_args = cmd_args
self.xml = None
self.api = api
if "<error" in self.response:
self.__parse_error()
def __parse_error(self):
"""Parse an error message which has been returned
in the response, then raise it as a DBGPError."""
xml = self.as_xml()
err_el = xml.find('%serror' % self.ns)
if err_el is None:
raise DBGPError("Could not parse error from return XML",1)
else:
code = err_el.get("code")
if code is None:
raise ResponseError(
"Missing error code in response",
self.response)
elif int(code) == 4:
raise CmdNotImplementedError('Command not implemented')
msg_el = err_el.find('%smessage' % self.ns)
if msg_el is None:
raise ResponseError(
"Missing error message in response",
self.response)
raise DBGPError(msg_el.text,code)
def get_cmd(self):
"""Get the command that created this response."""
return self.cmd
def get_cmd_args(self):
"""Get the arguments to the command."""
return self.cmd_args
def as_string(self):
"""Return the full response as a string.
There is a __str__ method, which will render the
whole object as a string and should be used for
displaying.
"""
return self.response
def as_xml(self):
"""Get the response as element tree XML.
Returns an xml.etree.ElementTree.Element object.
"""
if self.xml == None:
self.xml = ET.fromstring(self.response)
self.__determine_ns()
return self.xml
def __determine_ns(self):
tag_repr = str(self.xml.tag)
if tag_repr[0] != '{':
raise DBGPError('Invalid or missing XML namespace',1)
else:
ns_parts = tag_repr.split('}')
self.ns = ns_parts[0] + '}'
def __str__(self):
return self.as_string()
class ContextNamesResponse(Response):
def names(self):
names = {}
for c in list(self.as_xml()):
names[int(c.get('id'))] = c.get('name')
return names
class StatusResponse(Response):
"""Response object returned by the status command."""
def __str__(self):
return self.as_xml().get('status')
class StackGetResponse(Response):
"""Response object used by the stack_get command."""
def get_stack(self):
return list(self.as_xml())
class ContextGetResponse(Response):
"""Response object used by the context_get command.
The property nodes are converted into ContextProperty
objects, which are much easier to use."""
def __init__(self,response,cmd,cmd_args,api):
Response.__init__(self,response,cmd,cmd_args,api)
self.properties = []
def get_context(self):
for c in list(self.as_xml()):
self.create_properties(ContextProperty(c))
return self.properties
def create_properties(self,property):
self.properties.append(property)
for p in property.children:
self.create_properties(p)
class EvalResponse(ContextGetResponse):
"""Response object returned by the eval command."""
def __init__(self,response,cmd,cmd_args,api):
try:
ContextGetResponse.__init__(self,response,cmd,cmd_args,api)
except DBGPError as e:
if int(e.args[1]) == 206:
raise EvalError()
else:
raise e
def get_context(self):
code = self.get_code()
for c in list(self.as_xml()):
self.create_properties(EvalProperty(c,code,self.api.language))
return self.properties
def get_code(self):
cmd = self.get_cmd_args()
parts = cmd.split('-- ')
return base64.decodestring(parts[1])
class BreakpointSetResponse(Response):
"""Response object returned by the breakpoint_set command."""
def get_id(self):
return int(self.as_xml().get('id'))
def __str__(self):
return self.as_xml().get('id')
class FeatureGetResponse(Response):
"""Response object specifically for the feature_get command."""
def is_supported(self):
"""Whether the feature is supported or not."""
xml = self.as_xml()
return int(xml.get('supported'))
def __str__(self):
if self.is_supported():
xml = self.as_xml()
return xml.text
else:
return "* Feature not supported *"
class Api:
"""Api for eBGP commands.
Uses a Connection object to read and write with the debugger,
and builds commands and returns the results.
"""
conn = None
transID = 0
def __init__(self,connection):
"""Create a new Api using a Connection object.
The Connection object specifies the debugger connection,
and the Protocol provides a OO api to interacting
with it.
connection -- The Connection object to use
"""
self.language = None
self.protocol = None
self.idekey = None
self.startfile = None
self.conn = connection
if self.conn.isconnected() == 0:
self.conn.open()
self.__parse_init_msg(self.conn.recv_msg())
def __parse_init_msg(self,msg):
"""Parse the init message from the debugger"""
xml = ET.fromstring(msg)
self.language = xml.get("language")
if self.language is None:
raise ResponseError(
"Invalid XML response from debugger",
msg)
self.language = self.language.lower()
self.idekey = xml.get("idekey")
self.version = xml.get("api_version")
self.startfile = xml.get("fileuri")
def send_cmd(self,cmd,args = '',
res_cls = Response, timeout = None):
"""Send a command to the debugger.
This method automatically adds a unique transaction
ID to the command which is required by the debugger.
Returns a Response object, which contains the
response message and command.
cmd -- the command name, e.g. 'status'
args -- arguments for the command, which is optional
for certain commands (default '')
"""
args = args.strip()
send = cmd.strip()
self.transID += 1
send += ' -i '+ str(self.transID)
if len(args) > 0:
send += ' ' + args
vdebug.log.Log("Command: "+send,\
vdebug.log.Logger.DEBUG)
self.conn.send_msg(send)
return self.recv_msg(cmd,args,res_cls,timeout)
def recv_msg(self,cmd,args = '',
res_cls = Response, timeout = None):
try:
msg = self.conn.recv_msg(timeout)
vdebug.log.Log("Response: "+str(msg),\
vdebug.log.Logger.DEBUG)
return res_cls(msg,cmd,args,self)
except socket.timeout:
if timeout != None:
return None
else:
raise
except socket.error, e:
if e.args[0] == errno.EWOULDBLOCK or e.args[0] == errno.EAGAIN:
if timeout != None:
return None
else:
raise
else:
raise
def status(self):
"""Get the debugger status.
Returns a Response object.
"""
return self.send_cmd('status','',StatusResponse, 1.0)
def async_status_check(self):
"""Check for and returns a unhandled status response"""
return self.recv_msg('', '', StatusResponse, 0)
def feature_get(self,name):
"""Get the value of a feature from the debugger.
See the DBGP documentation for a list of features.
Returns a FeatureGetResponse object.
name -- name of the feature, e.g. encoding
"""
return self.send_cmd(
'feature_get',
'-n '+str(name),
FeatureGetResponse)
def feature_set(self,name,value):
"""Set the value of a debugger feature.
See the DBGP documentation for a list of features.
Returns a Response object.
name -- name of the feature, e.g. encoding
value -- new value for the feature
"""
return self.send_cmd(
'feature_set',
'-n ' + str(name) + ' -v ' + str(value))
def run(self):
"""Tell the debugger to start or resume
execution."""
return self.send_cmd('run','',StatusResponse, 1)
def eval(self,code):
"""Tell the debugger to start or resume
execution."""
code_enc = base64.encodestring(code)
args = '-- %s' % code_enc
""" The python engine incorrectly requires length.
if self.language == 'python':
args = ("-l %i " % len(code_enc) ) + args"""
return self.send_cmd('eval',args,EvalResponse)
def step_into(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will break on the first statement in the function.
"""
return self.send_cmd('step_into','',StatusResponse, 1)
def step_over(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will stop at the next statement after the function call.
"""
return self.send_cmd('step_over','',StatusResponse, 1)
def step_out(self):
"""Tell the debugger to step out of the statement.
The debugger will step out of the current scope.
"""
return self.send_cmd('step_out','',StatusResponse, 1)
def break_async(self):
"""Tell the debugger to asynchronously break
"""
return self.send_cmd('break','',StatusResponse, 1)
def stop(self):
"""Tell the debugger to stop execution.
The script is terminated immediately."""
return self.send_cmd('stop','',StatusResponse)
def stack_get(self):
"""Get the stack information.
"""
return self.send_cmd('stack_get','',StackGetResponse)
def context_get(self,context = 0, depth = 0):
"""Get the context variables.
"""
return self.send_cmd('context_get',\
'-c %i -d %i' % (int(context), int(depth)),\
ContextGetResponse)
def context_names(self):
"""Get the context types.
"""
return self.send_cmd('context_names','',ContextNamesResponse)
def property_get(self,name, depth = 0):
"""Get a property.
"""
return self.send_cmd('property_get','-n %s -d %i' % (name, int(depth)),ContextGetResponse)
def detach(self):
"""Tell the debugger to detach itself from this
client.
The script is not terminated, but runs as normal
from this point."""
ret = self.send_cmd('detach','',StatusResponse)
self.conn.close()
return ret
def breakpoint_set(self,cmd_args):
"""Set a breakpoint.
The breakpoint type is defined by the arguments, see the
Breakpoint class for more detail."""
return self.send_cmd('breakpoint_set',cmd_args,\
BreakpointSetResponse)
def breakpoint_list(self):
return self.send_cmd('breakpoint_list')
def breakpoint_remove(self,id):
"""Remove a breakpoint by ID.
The ID is that returned in the response from breakpoint_set."""
return self.send_cmd('breakpoint_remove','-d %i' % id,Response)
"""Connection module for managing a socket connection
between this client and the debugger."""
class Connection:
"""DBGP connection class, for managing the connection to the debugger.
The host, port and socket timeout are configurable on object construction.
"""
sock = None
address = None
isconned = 0
def __init__(self, host = '', port = 9000, timeout = 30, input_stream = None):
"""Create a new Connection.
The connection is not established until open() is called.
host -- host name where debugger is running (default '')
port -- port number which debugger is listening on (default 9000)
timeout -- time in seconds to wait for a debugger connection before giving up (default 30)
input_stream -- object for checking input stream and user interrupts (default None)
"""
self.port = port
self.host = host
self.timeout = timeout
self.input_stream = input_stream
def __del__(self):
"""Make sure the connection is closed."""
self.close()
def isconnected(self):
"""Whether the connection has been established."""
return self.isconned
def open(self):
"""Listen for a connection from the debugger. Listening for the actual
connection is handled by self.listen()."""
print 'Waiting for a connection (Ctrl-C to cancel, this message will self-destruct in ',self.timeout,' seconds...)'
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.setblocking(0)
serv.bind((self.host, self.port))
serv.listen(5)
(self.sock, self.address) = self.listen(serv, self.timeout)
self.sock.settimeout(None)
except socket.timeout:
serv.close()
raise TimeoutError("Timeout waiting for connection")
except:
serv.close()
raise
self.isconned = 1
serv.close()
def listen(self, serv, timeout):
"""Non-blocking listener. Provides support for keyboard interrupts from
the user. Although it's non-blocking, the user interface will still
block until the timeout is reached.
serv -- Socket server to listen to.
timeout -- Seconds before timeout.
"""
start = time.time()
while True:
if (time.time() - start) > timeout:
raise socket.timeout
try:
"""Check for user interrupts"""
if self.input_stream is not None:
self.input_stream.probe()
return serv.accept()
except socket.error:
pass
def close(self):
"""Close the connection."""
if self.sock != None:
vdebug.log.Log("Closing the socket",\
vdebug.log.Logger.DEBUG)
self.sock.close()
self.sock = None
self.isconned = 0
def __recv_length(self):
"""Get the length of the proceeding message."""
length = ''
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError('Socket Closed')
if c == '\0':
return int(length)
if c.isdigit():
length = length + c
def __recv_null(self):
"""Receive a null byte."""
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError('Socket Closed')
if c == '\0':
return
def __recv_body(self, to_recv):
"""Receive a message of a given length.
to_recv -- length of the message to receive
"""
body = ''
while to_recv > 0:
buf = self.sock.recv(to_recv)
if buf == '':
self.close()
raise EOFError('Socket Closed')
to_recv -= len(buf)
body = body + buf
return body
def recv_msg(self, timeout = None):
"""Receive a message from the debugger.
Returns a string, which is expected to be XML.
"""
self.sock.settimeout(timeout)
try:
length = self.__recv_length()
body = self.__recv_body(length)
self.__recv_null()
return body
finally:
if self.sock != None:
self.sock.settimeout(None)
def send_msg(self, cmd):
"""Send a message to the debugger.
cmd -- command to send
"""
self.sock.send(cmd + '\0')
class ContextProperty:
ns = '{urn:debugger_protocol_v1}'
def __init__(self,node,parent = None,depth = 0):
self.parent = parent
self.__determine_type(node)
self._determine_displayname(node)
self.encoding = node.get('encoding')
self.depth = depth
self.numchildren = int(node.get('numchildren'))
self.size = node.get('size')
self.value = ""
self.is_last_child = False
self._determine_children(node)
self.__determine_value(node)
self.__init_children(node)
if self.type == 'scalar':
self.size = len(self.value) - 2
def __determine_value(self,node):
if self.has_children:
self.value = ""
return
self.value = self._get_enc_node_text(node,'value')
if self.value is None:
if self.encoding == 'base64':
if node.text is None:
self.value = ""
else:
self.value = base64.decodestring(node.text)
elif not self.is_uninitialized() \
and not self.has_children:
self.value = node.text
if self.value is None:
self.value = ""
self.num_crs = self.value.count('\n')
if self.type.lower() in ("string","str","scalar"):
self.value = '`%s`' % self.value.replace('`','\\`')
def __determine_type(self,node):
type = node.get('classname')
if type is None:
type = node.get('type')
if type is None:
type = 'unknown'
self.type = type
def _determine_displayname(self,node):
display_name = node.get('fullname')
if display_name == None:
display_name = self._get_enc_node_text(node,'fullname',"")
if display_name == '::':
display_name = self.type
self.display_name = display_name
def _get_enc_node_text(self,node,name,default =
None):
n = node.find('%s%s' %(self.ns, name))
if n is not None and n.text is not None:
if n.get('encoding') == 'base64':
val = base64.decodestring(n.text)
else:
val = n.text
else:
val = None
if val is None:
return default
else:
return val
def _determine_children(self,node):
children = node.get('numchildren')
if children is None:
children = node.get('children')
if children is None:
children = 0
else:
children = int(children)
self.num_declared_children = children
self.has_children = True if children > 0 else False
self.children = []
def __init_children(self,node):
if self.has_children:
idx = 0
tagname = '%sproperty' % self.ns
children = list(node)
if children is not None:
for c in children:
if c.tag == tagname:
idx += 1
p = self._create_child(c,self,self.depth+1)
self.children.append(p)
if idx == self.num_declared_children:
p.mark_as_last_child()
def _create_child(self,node,parent,depth):
return ContextProperty(node,parent,depth)
def mark_as_last_child(self):
self.is_last_child = True
def is_uninitialized(self):
if self.type == 'uninitialized':
return True
else:
return False
def child_count(self):
return len(self.children)
def type_and_size(self):
size = None
if self.has_children:
size = self.num_declared_children
elif self.size is not None:
size = self.size
if size is None:
return self.type
else:
return "%s [%s]" %(self.type,size)
class EvalProperty(ContextProperty):
def __init__(self,node,code,language,parent=None,depth=0):
self.code = code
self.language = language.lower()
if parent is None:
self.is_parent = True
else:
self.is_parent = False
ContextProperty.__init__(self,node,parent,depth)
def _create_child(self,node,parent,depth):
return EvalProperty(node,self.code,self.language,parent,depth)
def _determine_displayname(self,node):
if self.is_parent:
self.display_name = self.code
else:
if self.language == 'php' or \
self.language == 'perl':
if self.parent.type == 'array':
self.display_name = self.parent.display_name + \
"['%s']" % node.get('name')
else:
self.display_name = self.parent.display_name + \
"->"+node.get('name')
else:
name = node.get('name')
if name is None:
name = "?"
name = self._get_enc_node_text(node,'name','?')
if self.parent.type == 'list':
self.display_name = self.parent.display_name + name
else:
self.display_name = self.parent.display_name + \
"." + name
""" Errors/Exceptions """
class TimeoutError(Exception):
pass
class DBGPError(Exception):
"""Raised when the debugger returns an error message."""
pass
class CmdNotImplementedError(Exception):
"""Raised when the debugger returns an error message."""
pass
class EvalError(Exception):
"""Raised when some evaluated code is invalid."""
pass
class ResponseError(Exception):
"""An error caused by an unexpected response from the
debugger (e.g. invalid format XML)."""
pass
|
{
"content_hash": "2ee54b126dcf71b5450aeb1472fa6cda",
"timestamp": "",
"source": "github",
"line_count": 732,
"max_line_length": 123,
"avg_line_length": 31.172131147540984,
"alnum_prop": 0.5504864580594268,
"repo_name": "mispencer/vdebug",
"id": "91282b9bbe783f1d629fd8ca0fe9e0ea3189dbf9",
"size": "22818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin/python/vdebug/dbgp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131799"
},
{
"name": "Ruby",
"bytes": "14991"
},
{
"name": "Shell",
"bytes": "582"
},
{
"name": "VimL",
"bytes": "11307"
}
],
"symlink_target": ""
}
|
import operator
import time
from itertools import chain
from datetime import datetime
from py4j.protocol import Py4JJavaError
from pyspark import RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.util import rddToFileName, TransformFunction
from pyspark.rdd import portable_hash
from pyspark.resultiterable import ResultIterable
__all__ = ["DStream"]
class DStream:
"""
A Discretized Stream (DStream), the basic abstraction in Spark Streaming,
is a continuous sequence of RDDs (of the same type) representing a
continuous stream of data (see :class:`RDD` in the Spark core documentation
for more details on RDDs).
DStreams can either be created from live data (such as, data from TCP
sockets, etc.) using a :class:`StreamingContext` or it can be
generated by transforming existing DStreams using operations such as
`map`, `window` and `reduceByKeyAndWindow`. While a Spark Streaming
program is running, each DStream periodically generates a RDD, either
from live data or by transforming the RDD generated by a parent DStream.
DStreams internally is characterized by a few basic properties:
- A list of other DStreams that the DStream depends on
- A time interval at which the DStream generates an RDD
- A function that is used to generate an RDD after each time interval
"""
def __init__(self, jdstream, ssc, jrdd_deserializer):
self._jdstream = jdstream
self._ssc = ssc
self._sc = ssc._sc
self._jrdd_deserializer = jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
def context(self):
"""
Return the StreamingContext associated with this DStream
"""
return self._ssc
def count(self):
"""
Return a new DStream in which each RDD has a single element
generated by counting each RDD of this DStream.
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add)
def filter(self, f):
"""
Return a new DStream containing only the elements that satisfy predicate.
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new DStream by applying a function to all elements of
this DStream, and then flattening the results
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def map(self, f, preservesPartitioning=False):
"""
Return a new DStream by applying a function to each element of DStream.
"""
def func(iterator):
return map(f, iterator)
return self.mapPartitions(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new DStream in which each RDD is generated by applying
mapPartitions() to each RDDs of this DStream.
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new DStream in which each RDD is generated by applying
mapPartitionsWithIndex() to each RDDs of this DStream.
"""
return self.transform(lambda rdd: rdd.mapPartitionsWithIndex(f, preservesPartitioning))
def reduce(self, func):
"""
Return a new DStream in which each RDD has a single element
generated by reducing each RDD of this DStream.
"""
return self.map(lambda x: (None, x)).reduceByKey(func, 1).map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None):
"""
Return a new DStream by applying reduceByKey to each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.combineByKey(lambda x: x, func, func, numPartitions)
def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None):
"""
Return a new DStream by applying combineByKey to each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
def func(rdd):
return rdd.combineByKey(createCombiner, mergeValue, mergeCombiners, numPartitions)
return self.transform(func)
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the DStream in which each RDD are partitioned
using the specified partitioner.
"""
return self.transform(lambda rdd: rdd.partitionBy(numPartitions, partitionFunc))
def foreachRDD(self, func):
"""
Apply a function to each RDD in this DStream.
"""
if func.__code__.co_argcount == 1:
old_func = func
def func(_, rdd):
return old_func(rdd)
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer)
api = self._ssc._jvm.PythonDStream
api.callForeachRDD(self._jdstream, jfunc)
def pprint(self, num=10):
"""
Print the first num elements of each RDD generated in this DStream.
Parameters
----------
num : int, optional
the number of elements from the first will be printed.
"""
def takeAndPrint(time, rdd):
taken = rdd.take(num + 1)
print("-------------------------------------------")
print("Time: %s" % time)
print("-------------------------------------------")
for record in taken[:num]:
print(record)
if len(taken) > num:
print("...")
print("")
self.foreachRDD(takeAndPrint)
def mapValues(self, f):
"""
Return a new DStream by applying a map function to the value of
each key-value pairs in this DStream without changing the key.
"""
def map_values_fn(kv):
return kv[0], f(kv[1])
return self.map(map_values_fn, preservesPartitioning=True)
def flatMapValues(self, f):
"""
Return a new DStream by applying a flatmap function to the value
of each key-value pairs in this DStream without changing the key.
"""
def flat_map_fn(kv):
return ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def glom(self):
"""
Return a new DStream in which RDD is generated by applying glom()
to RDD of this DStream.
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cache(self):
"""
Persist the RDDs of this DStream with the default storage level
(`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel):
"""
Persist the RDDs of this DStream with the given storage level
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdstream.persist(javaStorageLevel)
return self
def checkpoint(self, interval):
"""
Enable periodic checkpointing of RDDs of this DStream
Parameters
----------
interval : int
time in seconds, after each period of that, generated
RDD will be checkpointed
"""
self.is_checkpointed = True
self._jdstream.checkpoint(self._ssc._jduration(interval))
return self
def groupByKey(self, numPartitions=None):
"""
Return a new DStream by applying groupByKey on each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transform(lambda rdd: rdd.groupByKey(numPartitions))
def countByValue(self):
"""
Return a new DStream in which each RDD contains the counts of each
distinct value in each RDD of this DStream.
"""
return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
def saveAsTextFiles(self, prefix, suffix=None):
"""
Save each RDD in this DStream as at text file, using string
representation of elements.
"""
def saveAsTextFile(t, rdd):
path = rddToFileName(prefix, suffix, t)
try:
rdd.saveAsTextFile(path)
except Py4JJavaError as e:
# after recovered from checkpointing, the foreachRDD may
# be called twice
if "FileAlreadyExistsException" not in str(e):
raise
return self.foreachRDD(saveAsTextFile)
# TODO: uncomment this until we have ssc.pickleFileStream()
# def saveAsPickleFiles(self, prefix, suffix=None):
# """
# Save each RDD in this DStream as at binary file, the elements are
# serialized by pickle.
# """
# def saveAsPickleFile(t, rdd):
# path = rddToFileName(prefix, suffix, t)
# try:
# rdd.saveAsPickleFile(path)
# except Py4JJavaError as e:
# # after recovered from checkpointing, the foreachRDD may
# # be called twice
# if 'FileAlreadyExistsException' not in str(e):
# raise
# return self.foreachRDD(saveAsPickleFile)
def transform(self, func):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream.
`func` can have one argument of `rdd`, or have two arguments of
(`time`, `rdd`)
"""
if func.__code__.co_argcount == 1:
oldfunc = func
def func(_, rdd):
return oldfunc(rdd)
assert func.__code__.co_argcount == 2, "func should take one or two arguments"
return TransformedDStream(self, func)
def transformWith(self, func, other, keepSerializer=False):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rdd_a`, `rdd_b`)
"""
if func.__code__.co_argcount == 2:
oldfunc = func
def func(_, a, b):
return oldfunc(a, b)
assert func.__code__.co_argcount == 3, "func should take two or three arguments"
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformed2DStream(
self._jdstream.dstream(), other._jdstream.dstream(), jfunc
)
jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer
return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
def repartition(self, numPartitions):
"""
Return a new DStream with an increased or decreased level of parallelism.
"""
return self.transform(lambda rdd: rdd.repartition(numPartitions))
@property
def _slideDuration(self):
"""
Return the slideDuration in seconds of this DStream
"""
return self._jdstream.dstream().slideDuration().milliseconds() / 1000.0
def union(self, other):
"""
Return a new DStream by unifying data of another DStream with this DStream.
Parameters
----------
other : :class:`DStream`
Another DStream having the same interval (i.e., slideDuration)
as this DStream.
"""
if self._slideDuration != other._slideDuration:
raise ValueError("the two DStream should have same slide duration")
return self.transformWith(lambda a, b: a.union(b), other, True)
def cogroup(self, other, numPartitions=None):
"""
Return a new DStream by applying 'cogroup' between RDDs of this
DStream and `other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
def join(self, other, numPartitions=None):
"""
Return a new DStream by applying 'join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.join(b, numPartitions), other)
def leftOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'left outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.leftOuterJoin(b, numPartitions), other)
def rightOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'right outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.rightOuterJoin(b, numPartitions), other)
def fullOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'full outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.fullOuterJoin(b, numPartitions), other)
def _jtime(self, timestamp):
"""Convert datetime or unix_timestamp into Time"""
if isinstance(timestamp, datetime):
timestamp = time.mktime(timestamp.timetuple())
return self._sc._jvm.Time(int(timestamp * 1000))
def slice(self, begin, end):
"""
Return all the RDDs between 'begin' to 'end' (both included)
`begin`, `end` could be datetime.datetime() or unix_timestamp
"""
jrdds = self._jdstream.slice(self._jtime(begin), self._jtime(end))
return [RDD(jrdd, self._sc, self._jrdd_deserializer) for jrdd in jrdds]
def _validate_window_param(self, window, slide):
duration = self._jdstream.dstream().slideDuration().milliseconds()
if int(window * 1000) % duration != 0:
raise ValueError(
"windowDuration must be multiple of the parent "
"dstream's slide (batch) duration (%d ms)" % duration
)
if slide and int(slide * 1000) % duration != 0:
raise ValueError(
"slideDuration must be multiple of the parent "
"dstream's slide (batch) duration (%d ms)" % duration
)
def window(self, windowDuration, slideDuration=None):
"""
Return a new DStream in which each RDD contains all the elements in seen in a
sliding window of time over this DStream.
Parameters
----------
windowDuration : int
width of the window; must be a multiple of this DStream's
batching interval
slideDuration : int, optional
sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
self._validate_window_param(windowDuration, slideDuration)
d = self._ssc._jduration(windowDuration)
if slideDuration is None:
return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer)
s = self._ssc._jduration(slideDuration)
return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer)
def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated by reducing all
elements in a sliding window over this DStream.
if `invReduceFunc` is not None, the reduction is done incrementally
using the old window's reduced value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
This is more efficient than `invReduceFunc` is None.
Parameters
----------
reduceFunc : function
associative and commutative reduce function
invReduceFunc : function
inverse reduce function of `reduceFunc`; such that for all y,
and invertible x:
`invReduceFunc(reduceFunc(x, y), x) = y`
windowDuration : int
width of the window; must be a multiple of this DStream's
batching interval
slideDuration : int
sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
keyed = self.map(lambda x: (1, x))
reduced = keyed.reduceByKeyAndWindow(
reduceFunc, invReduceFunc, windowDuration, slideDuration, 1
)
return reduced.map(lambda kv: kv[1])
def countByWindow(self, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated
by counting the number of elements in a window over this DStream.
windowDuration and slideDuration are as defined in the window() operation.
This is equivalent to window(windowDuration, slideDuration).count(),
but will be more efficient if window is large.
"""
return self.map(lambda x: 1).reduceByWindow(
operator.add, operator.sub, windowDuration, slideDuration
)
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None):
"""
Return a new DStream in which each RDD contains the count of distinct elements in
RDDs in a sliding window over this DStream.
Parameters
----------
windowDuration : int
width of the window; must be a multiple of this DStream's
batching interval
slideDuration : int
sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
numPartitions : int, optional
number of partitions of each RDD in the new DStream.
"""
keyed = self.map(lambda x: (x, 1))
counted = keyed.reduceByKeyAndWindow(
operator.add, operator.sub, windowDuration, slideDuration, numPartitions
)
return counted.filter(lambda kv: kv[1] > 0)
def groupByKeyAndWindow(self, windowDuration, slideDuration, numPartitions=None):
"""
Return a new DStream by applying `groupByKey` over a sliding window.
Similar to `DStream.groupByKey()`, but applies it over a sliding window.
Parameters
----------
windowDuration : int
width of the window; must be a multiple of this DStream's
batching interval
slideDuration : int
sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
numPartitions : int, optional
Number of partitions of each RDD in the new DStream.
"""
ls = self.mapValues(lambda x: [x])
grouped = ls.reduceByKeyAndWindow(
lambda a, b: a.extend(b) or a,
lambda a, b: a[len(b) :],
windowDuration,
slideDuration,
numPartitions,
)
return grouped.mapValues(ResultIterable)
def reduceByKeyAndWindow(
self, func, invFunc, windowDuration, slideDuration=None, numPartitions=None, filterFunc=None
):
"""
Return a new DStream by applying incremental `reduceByKey` over a sliding window.
The reduced value of over a new window is calculated using the old window's reduce value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
`invFunc` can be None, then it will reduce all the RDDs in window, could be slower
than having `invFunc`.
Parameters
----------
func : function
associative and commutative reduce function
invFunc : function
inverse function of `reduceFunc`
windowDuration : int
width of the window; must be a multiple of this DStream's
batching interval
slideDuration : int, optional
sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
numPartitions : int, optional
number of partitions of each RDD in the new DStream.
filterFunc : function, optional
function to filter expired key-value pairs;
only pairs that satisfy the function are retained
set this to null if you do not want to filter
"""
self._validate_window_param(windowDuration, slideDuration)
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
reduced = self.reduceByKey(func, numPartitions)
if invFunc:
def reduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
r = a.union(b).reduceByKey(func, numPartitions) if a else b
if filterFunc:
r = r.filter(filterFunc)
return r
def invReduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
joined = a.leftOuterJoin(b, numPartitions)
return joined.mapValues(
lambda kv: invFunc(kv[0], kv[1]) if kv[1] is not None else kv[0]
)
jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer)
jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer)
if slideDuration is None:
slideDuration = self._slideDuration
dstream = self._sc._jvm.PythonReducedWindowedDStream(
reduced._jdstream.dstream(),
jreduceFunc,
jinvReduceFunc,
self._ssc._jduration(windowDuration),
self._ssc._jduration(slideDuration),
)
return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
else:
return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions)
def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None):
"""
Return a new "state" DStream where the state for each key is updated by applying
the given function on the previous state of the key and the new values of the key.
Parameters
----------
updateFunc : function
State update function. If this function returns None, then
corresponding state key-value pair will be eliminated.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if initialRDD and not isinstance(initialRDD, RDD):
initialRDD = self._sc.parallelize(initialRDD)
def reduceFunc(t, a, b):
if a is None:
g = b.groupByKey(numPartitions).mapValues(lambda vs: (list(vs), None))
else:
g = a.cogroup(b.partitionBy(numPartitions), numPartitions)
g = g.mapValues(lambda ab: (list(ab[1]), list(ab[0])[0] if len(ab[0]) else None))
state = g.mapValues(lambda vs_s: updateFunc(vs_s[0], vs_s[1]))
return state.filter(lambda k_v: k_v[1] is not None)
jreduceFunc = TransformFunction(
self._sc, reduceFunc, self._sc.serializer, self._jrdd_deserializer
)
if initialRDD:
initialRDD = initialRDD._reserialize(self._jrdd_deserializer)
dstream = self._sc._jvm.PythonStateDStream(
self._jdstream.dstream(), jreduceFunc, initialRDD._jrdd
)
else:
dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc)
return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
class TransformedDStream(DStream):
"""
TransformedDStream is a DStream generated by an Python function
transforming each RDD of a DStream to another RDDs.
Multiple continuous transformations of DStream can be combined into
one transformation.
"""
def __init__(self, prev, func):
self._ssc = prev._ssc
self._sc = self._ssc._sc
self._jrdd_deserializer = self._sc.serializer
self.is_cached = False
self.is_checkpointed = False
self._jdstream_val = None
# Using type() to avoid folding the functions and compacting the DStreams which is not
# not strictly an object of TransformedDStream.
if type(prev) is TransformedDStream and not prev.is_cached and not prev.is_checkpointed:
prev_func = prev.func
self.func = lambda t, rdd: func(t, prev_func(t, rdd))
self.prev = prev.prev
else:
self.prev = prev
self.func = func
@property
def _jdstream(self):
if self._jdstream_val is not None:
return self._jdstream_val
jfunc = TransformFunction(self._sc, self.func, self.prev._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformedDStream(self.prev._jdstream.dstream(), jfunc)
self._jdstream_val = dstream.asJavaDStream()
return self._jdstream_val
|
{
"content_hash": "03826ff76418f6825835ffccbfbfba49",
"timestamp": "",
"source": "github",
"line_count": 711,
"max_line_length": 100,
"avg_line_length": 38.15611814345991,
"alnum_prop": 0.6156880091415091,
"repo_name": "xuanyuanking/spark",
"id": "f445a78bd95301bb211e6e5946c32050bca71a78",
"size": "27914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/streaming/dstream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "54336"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26221"
},
{
"name": "Dockerfile",
"bytes": "9711"
},
{
"name": "HTML",
"bytes": "42080"
},
{
"name": "HiveQL",
"bytes": "1872438"
},
{
"name": "Java",
"bytes": "4519872"
},
{
"name": "JavaScript",
"bytes": "222664"
},
{
"name": "Jupyter Notebook",
"bytes": "4310516"
},
{
"name": "Makefile",
"bytes": "2374"
},
{
"name": "PLpgSQL",
"bytes": "352963"
},
{
"name": "PowerShell",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "7388289"
},
{
"name": "R",
"bytes": "1272682"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "31791"
},
{
"name": "Scala",
"bytes": "40053974"
},
{
"name": "Shell",
"bytes": "230591"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "98156"
}
],
"symlink_target": ""
}
|
"""
Tests of neo.io.NSDFIO
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division
import numpy as np
import quantities as pq
from datetime import datetime
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
from neo.io.nsdfio import HAVE_NSDF, NSDFIO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.core import AnalogSignal, Segment, Block
from neo.test.tools import assert_same_attributes, assert_same_annotations, assert_neo_object_is_compliant
@unittest.skipUnless(HAVE_NSDF, "Requires NSDF")
class CommonTests(BaseTestIO, unittest.TestCase):
ioclass = NSDFIO
read_and_write_is_bijective = False
@unittest.skipUnless(HAVE_NSDF, "Requires NSDF")
class NSDFIOTest(unittest.TestCase):
"""
Base class for all NSDFIO tests.
setUp and tearDown methods are responsible for respectively: setting up and cleaning after tests
All create_{object} methods create and return an example {object}.
"""
def setUp(self):
self.filename = 'nsdfio_testfile.h5'
self.io = NSDFIO(self.filename)
def tearDown(self):
os.remove(self.filename)
def create_list_of_blocks(self):
blocks = []
for i in range(2):
blocks.append(self.create_block(name='Block #{}'.format(i)))
return blocks
def create_block(self, name='Block'):
block = Block()
self._assign_basic_attributes(block, name=name)
self._assign_datetime_attributes(block)
self._assign_index_attribute(block)
self._create_block_children(block)
self._assign_annotations(block)
return block
def _create_block_children(self, block):
for i in range(3):
block.segments.append(self.create_segment(block, name='Segment #{}'.format(i)))
def create_segment(self, parent=None, name='Segment'):
segment = Segment()
segment.block = parent
self._assign_basic_attributes(segment, name=name)
self._assign_datetime_attributes(segment)
self._assign_index_attribute(segment)
self._create_segment_children(segment)
self._assign_annotations(segment)
return segment
def _create_segment_children(self, segment):
for i in range(2):
segment.analogsignals.append(self.create_analogsignal(segment, name='Signal #{}'.format(i * 3)))
segment.analogsignals.append(self.create_analogsignal2(segment, name='Signal #{}'.format(i * 3 + 1)))
segment.analogsignals.append(self.create_analogsignal3(segment, name='Signal #{}'.format(i * 3 + 2)))
def create_analogsignal(self, parent=None, name='AnalogSignal1'):
signal = AnalogSignal([[1.0, 2.5], [2.2, 3.1], [3.2, 4.4]], units='mV',
sampling_rate=100 * pq.Hz, t_start=2 * pq.min)
signal.segment = parent
self._assign_basic_attributes(signal, name=name)
self._assign_annotations(signal)
return signal
def create_analogsignal2(self, parent=None, name='AnalogSignal2'):
signal = AnalogSignal([[1], [2], [3], [4], [5]], units='mA',
sampling_period=0.5 * pq.ms)
signal.segment = parent
self._assign_annotations(signal)
return signal
def create_analogsignal3(self, parent=None, name='AnalogSignal3'):
signal = AnalogSignal([[1, 2, 3], [4, 5, 6]], units='mV',
sampling_rate=2 * pq.kHz, t_start=100 * pq.s)
signal.segment = parent
self._assign_basic_attributes(signal, name=name)
return signal
def _assign_basic_attributes(self, object, name=None):
if name is None:
object.name = 'neo object'
else:
object.name = name
object.description = 'Example of neo object'
object.file_origin = 'datafile.pp'
def _assign_datetime_attributes(self, object):
object.file_datetime = datetime(2017, 6, 11, 14, 53, 23)
object.rec_datetime = datetime(2017, 5, 29, 13, 12, 47)
def _assign_index_attribute(self, object):
object.index = 12
def _assign_annotations(self, object):
object.annotations = {'str': 'value',
'int': 56,
'float': 5.234}
@unittest.skipUnless(HAVE_NSDF, "Requires NSDF")
class NSDFIOTestWriteThenRead(NSDFIOTest):
"""
Class for testing NSDFIO.
It first creates example neo objects, then writes them to the file,
reads the file and compares the result with the original ones.
all test_{object} methods run "write then read" test for a/an {object}
all compare_{object} methods check if the second {object} is a proper copy
of the first one, read in suitable lazy and cascade mode
"""
lazy_modes = [False, True]
cascade_modes = [False, True]
def test_list_of_blocks(self, lazy=False, cascade=True):
blocks = self.create_list_of_blocks()
self.io.write(blocks)
for lazy in self.lazy_modes:
for cascade in self.cascade_modes:
blocks2 = self.io.read(lazy=lazy, cascade=cascade)
self.compare_list_of_blocks(blocks, blocks2, lazy, cascade)
def test_block(self, lazy=False, cascade=True):
block = self.create_block()
self.io.write_block(block)
for lazy in self.lazy_modes:
for cascade in self.cascade_modes:
block2 = self.io.read_block(lazy=lazy, cascade=cascade)
self.compare_blocks(block, block2, lazy, cascade)
def test_segment(self, lazy=False, cascade=True):
segment = self.create_segment()
self.io.write_segment(segment)
for lazy in self.lazy_modes:
for cascade in self.cascade_modes:
segment2 = self.io.read_segment(lazy=lazy, cascade=cascade)
self.compare_segments(segment, segment2, lazy, cascade)
def compare_list_of_blocks(self, blocks1, blocks2, lazy=False, cascade=True):
assert len(blocks1) == len(blocks2)
for block1, block2 in zip(blocks1, blocks2):
self.compare_blocks(block1, block2, lazy, cascade)
def compare_blocks(self, block1, block2, lazy=False, cascade=True):
self._compare_objects(block1, block2)
assert block2.file_datetime == datetime.fromtimestamp(os.stat(self.filename).st_mtime)
assert_neo_object_is_compliant(block2)
if cascade:
self._compare_blocks_children(block1, block2, lazy=lazy)
else:
assert len(block2.segments) == 0
def _compare_blocks_children(self, block1, block2, lazy):
assert len(block1.segments) == len(block2.segments)
for segment1, segment2 in zip(block1.segments, block2.segments):
self.compare_segments(segment1, segment2, lazy=lazy)
def compare_segments(self, segment1, segment2, lazy=False, cascade=True):
self._compare_objects(segment1, segment2)
assert segment2.file_datetime == datetime.fromtimestamp(os.stat(self.filename).st_mtime)
if cascade:
self._compare_segments_children(segment1, segment2, lazy=lazy)
else:
assert len(segment2.analogsignals) == 0
def _compare_segments_children(self, segment1, segment2, lazy):
assert len(segment1.analogsignals) == len(segment2.analogsignals)
for signal1, signal2 in zip(segment1.analogsignals, segment2.analogsignals):
self.compare_analogsignals(signal1, signal2, lazy=lazy)
def compare_analogsignals(self, signal1, signal2, lazy=False, cascade=True):
if not lazy:
self._compare_objects(signal1, signal2)
else:
self._compare_objects(signal1, signal2, exclude_attr=['shape', 'signal'])
assert signal2.lazy_shape == signal1.shape
assert signal2.dtype == signal1.dtype
def _compare_objects(self, object1, object2, exclude_attr=[]):
assert object1.__class__.__name__ == object2.__class__.__name__
assert object2.file_origin == self.filename
assert_same_attributes(object1, object2, exclude=['file_origin', 'file_datetime'] + exclude_attr)
assert_same_annotations(object1, object2)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "92036ed2bec0bb69a458736b0977e0cb",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 113,
"avg_line_length": 36.15584415584416,
"alnum_prop": 0.6404454022988506,
"repo_name": "theunissenlab/python-neo",
"id": "d388e2157d8562c69ee327d418b2b60dba959b24",
"size": "8376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/test/iotest/test_nsdfio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1730512"
}
],
"symlink_target": ""
}
|
import asyncio
import inspect
import io
import os.path
import urllib.parse
import zlib
from http.cookies import SimpleCookie
from unittest import mock
import pytest
from multidict import CIMultiDict, CIMultiDictProxy, upstr
from yarl import URL
import aiohttp
from aiohttp import BaseConnector, hdrs, helpers
from aiohttp.client_reqrep import ClientRequest, ClientResponse
@pytest.yield_fixture
def make_request(loop):
request = None
def maker(method, url, *args, **kwargs):
nonlocal request
request = ClientRequest(method, URL(url), *args, loop=loop, **kwargs)
return request
yield maker
if request is not None:
loop.run_until_complete(request.close())
def test_method1(make_request):
req = make_request('get', 'http://python.org/')
assert req.method == 'GET'
def test_method2(make_request):
req = make_request('head', 'http://python.org/')
assert req.method == 'HEAD'
def test_method3(make_request):
req = make_request('HEAD', 'http://python.org/')
assert req.method == 'HEAD'
def test_version_1_0(make_request):
req = make_request('get', 'http://python.org/', version='1.0')
assert req.version == (1, 0)
def test_version_default(make_request):
req = make_request('get', 'http://python.org/')
assert req.version == (1, 1)
def test_version_err(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://python.org/', version='1.c')
def test_host_port_default_http(make_request):
req = make_request('get', 'http://python.org/')
assert req.host == 'python.org'
assert req.port == 80
assert not req.ssl
def test_host_port_default_https(make_request):
req = make_request('get', 'https://python.org/')
assert req.host == 'python.org'
assert req.port == 443
assert req.ssl
def test_host_port_nondefault_http(make_request):
req = make_request('get', 'http://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert not req.ssl
def test_host_port_nondefault_https(make_request):
req = make_request('get', 'https://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert req.ssl
def test_host_port_default_ws(make_request):
req = make_request('get', 'ws://python.org/')
assert req.host == 'python.org'
assert req.port == 80
assert not req.ssl
def test_host_port_default_wss(make_request):
req = make_request('get', 'wss://python.org/')
assert req.host == 'python.org'
assert req.port == 443
assert req.ssl
def test_host_port_nondefault_ws(make_request):
req = make_request('get', 'ws://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert not req.ssl
def test_host_port_nondefault_wss(make_request):
req = make_request('get', 'wss://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert req.ssl
def test_host_port_err(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://python.org:123e/')
def test_hostname_err(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://:8080/')
def test_host_header_host_without_port(make_request):
req = make_request('get', 'http://python.org/')
assert req.headers['HOST'] == 'python.org'
def test_host_header_host_with_default_port(make_request):
req = make_request('get', 'http://python.org:80/')
assert req.headers['HOST'] == 'python.org'
def test_host_header_host_with_nondefault_port(make_request):
req = make_request('get', 'http://python.org:99/')
assert req.headers['HOST'] == 'python.org:99'
def test_host_header_explicit_host(make_request):
req = make_request('get', 'http://python.org/',
headers={'host': 'example.com'})
assert req.headers['HOST'] == 'example.com'
def test_host_header_explicit_host_with_port(make_request):
req = make_request('get', 'http://python.org/',
headers={'host': 'example.com:99'})
assert req.headers['HOST'] == 'example.com:99'
def test_default_loop(loop):
asyncio.set_event_loop(loop)
req = ClientRequest('get', URL('http://python.org/'))
assert req.loop is loop
def test_default_headers_useragent(make_request):
req = make_request('get', 'http://python.org/')
assert 'SERVER' not in req.headers
assert 'USER-AGENT' in req.headers
def test_default_headers_useragent_custom(make_request):
req = make_request('get', 'http://python.org/',
headers={'user-agent': 'my custom agent'})
assert 'USER-Agent' in req.headers
assert 'my custom agent' == req.headers['User-Agent']
def test_skip_default_useragent_header(make_request):
req = make_request('get', 'http://python.org/',
skip_auto_headers=set([upstr('user-agent')]))
assert 'User-Agent' not in req.headers
def test_headers(make_request):
req = make_request('get', 'http://python.org/',
headers={'Content-Type': 'text/plain'})
assert 'CONTENT-TYPE' in req.headers
assert req.headers['CONTENT-TYPE'] == 'text/plain'
assert req.headers['ACCEPT-ENCODING'] == 'gzip, deflate'
def test_headers_list(make_request):
req = make_request('get', 'http://python.org/',
headers=[('Content-Type', 'text/plain')])
assert 'CONTENT-TYPE' in req.headers
assert req.headers['CONTENT-TYPE'] == 'text/plain'
def test_headers_default(make_request):
req = make_request('get', 'http://python.org/',
headers={'ACCEPT-ENCODING': 'deflate'})
assert req.headers['ACCEPT-ENCODING'] == 'deflate'
def test_invalid_url(make_request):
with pytest.raises(ValueError):
make_request('get', 'hiwpefhipowhefopw')
def test_invalid_idna(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://\u2061owhefopw.com')
def test_no_path(make_request):
req = make_request('get', 'http://python.org')
assert '/' == req.url.path
def test_ipv6_default_http_port(make_request):
req = make_request('get', 'http://[2001:db8::1]/')
assert req.host == '2001:db8::1'
assert req.port == 80
assert not req.ssl
def test_ipv6_default_https_port(make_request):
req = make_request('get', 'https://[2001:db8::1]/')
assert req.host == '2001:db8::1'
assert req.port == 443
assert req.ssl
def test_ipv6_nondefault_http_port(make_request):
req = make_request('get', 'http://[2001:db8::1]:960/')
assert req.host == '2001:db8::1'
assert req.port == 960
assert not req.ssl
def test_ipv6_nondefault_https_port(make_request):
req = make_request('get', 'https://[2001:db8::1]:960/')
assert req.host == '2001:db8::1'
assert req.port == 960
assert req.ssl
def test_basic_auth(make_request):
req = make_request('get', 'http://python.org',
auth=aiohttp.helpers.BasicAuth('nkim', '1234'))
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbToxMjM0' == req.headers['AUTHORIZATION']
def test_basic_auth_utf8(make_request):
req = make_request('get', 'http://python.org',
auth=aiohttp.helpers.BasicAuth('nkim', 'секрет',
'utf-8'))
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbTrRgdC10LrRgNC10YI=' == req.headers['AUTHORIZATION']
def test_basic_auth_tuple_forbidden(make_request):
with pytest.raises(TypeError):
make_request('get', 'http://python.org',
auth=('nkim', '1234'))
def test_basic_auth_from_url(make_request):
req = make_request('get', 'http://nkim:1234@python.org')
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbToxMjM0' == req.headers['AUTHORIZATION']
assert 'python.org' == req.host
def test_basic_auth_from_url_overriden(make_request):
req = make_request('get', 'http://garbage@python.org',
auth=aiohttp.BasicAuth('nkim', '1234'))
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbToxMjM0' == req.headers['AUTHORIZATION']
assert 'python.org' == req.host
def test_path_is_not_double_encoded1(make_request):
req = make_request('get', "http://0.0.0.0/get/test case")
assert req.url.raw_path == "/get/test%20case"
def test_path_is_not_double_encoded2(make_request):
req = make_request('get', "http://0.0.0.0/get/test%2fcase")
assert req.url.raw_path == "/get/test%2Fcase"
def test_path_is_not_double_encoded3(make_request):
req = make_request('get', "http://0.0.0.0/get/test%20case")
assert req.url.raw_path == "/get/test%20case"
def test_path_safe_chars_preserved(make_request):
req = make_request('get', "http://0.0.0.0/get/:=")
assert req.url.path == "/get/:="
def test_params_are_added_before_fragment1(make_request):
req = make_request('GET', "http://example.com/path#fragment",
params={"a": "b"})
assert str(req.url) == "http://example.com/path?a=b"
def test_params_are_added_before_fragment2(make_request):
req = make_request('GET', "http://example.com/path?key=value#fragment",
params={"a": "b"})
assert str(req.url) == "http://example.com/path?key=value&a=b"
def test_path_not_contain_fragment1(make_request):
req = make_request('GET', "http://example.com/path#fragment")
assert req.url.path == "/path"
def test_path_not_contain_fragment2(make_request):
req = make_request('GET', "http://example.com/path?key=value#fragment")
assert str(req.url) == "http://example.com/path?key=value"
def test_cookies(make_request):
req = make_request('get', 'http://test.com/path',
cookies={'cookie1': 'val1'})
assert 'COOKIE' in req.headers
assert 'cookie1=val1' == req.headers['COOKIE']
def test_cookies_merge_with_headers(make_request):
req = make_request('get', 'http://test.com/path',
headers={'cookie': 'cookie1=val1'},
cookies={'cookie2': 'val2'})
assert 'cookie1=val1; cookie2=val2' == req.headers['COOKIE']
def test_unicode_get1(make_request):
req = make_request('get', 'http://python.org',
params={'foo': 'f\xf8\xf8'})
assert 'http://python.org/?foo=f%C3%B8%C3%B8' == str(req.url)
def test_unicode_get2(make_request):
req = make_request('', 'http://python.org',
params={'f\xf8\xf8': 'f\xf8\xf8'})
assert 'http://python.org/?f%C3%B8%C3%B8=f%C3%B8%C3%B8' == str(req.url)
def test_unicode_get3(make_request):
req = make_request('', 'http://python.org', params={'foo': 'foo'})
assert 'http://python.org/?foo=foo' == str(req.url)
def test_unicode_get4(make_request):
def join(*suffix):
return urllib.parse.urljoin('http://python.org/', '/'.join(suffix))
req = make_request('', join('\xf8'), params={'foo': 'foo'})
assert 'http://python.org/%C3%B8?foo=foo' == str(req.url)
def test_query_multivalued_param(make_request):
for meth in ClientRequest.ALL_METHODS:
req = make_request(
meth, 'http://python.org',
params=(('test', 'foo'), ('test', 'baz')))
assert str(req.url) == 'http://python.org/?test=foo&test=baz'
def test_query_str_param(make_request):
for meth in ClientRequest.ALL_METHODS:
req = make_request(meth, 'http://python.org', params='test=foo')
assert str(req.url) == 'http://python.org/?test=foo'
def test_query_bytes_param_raises(make_request):
for meth in ClientRequest.ALL_METHODS:
with pytest.raises(TypeError):
make_request(meth, 'http://python.org', params=b'test=foo')
def test_query_str_param_is_not_encoded(make_request):
for meth in ClientRequest.ALL_METHODS:
req = make_request(meth, 'http://python.org', params='test=f+oo')
assert str(req.url) == 'http://python.org/?test=f+oo'
def test_params_update_path_and_url(make_request):
req = make_request('get', 'http://python.org',
params=(('test', 'foo'), ('test', 'baz')))
assert str(req.url) == 'http://python.org/?test=foo&test=baz'
def test_params_empty_path_and_url(make_request):
req_empty = make_request('get', 'http://python.org', params={})
assert str(req_empty.url) == 'http://python.org'
req_none = make_request('get', 'http://python.org')
assert str(req_none.url) == 'http://python.org'
def test_gen_netloc_all(make_request):
req = make_request('get',
'https://aiohttp:pwpwpw@' +
'12345678901234567890123456789' +
'012345678901234567890:8080')
assert req.headers['HOST'] == '12345678901234567890123456789' +\
'012345678901234567890:8080'
def test_gen_netloc_no_port(make_request):
req = make_request('get',
'https://aiohttp:pwpwpw@' +
'12345678901234567890123456789' +
'012345678901234567890/')
assert req.headers['HOST'] == '12345678901234567890123456789' +\
'012345678901234567890'
@asyncio.coroutine
def test_no_content_length(loop):
req = ClientRequest('get', URL('http://python.org'), loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert '0' == req.headers.get('CONTENT-LENGTH')
yield from req.close()
resp.close()
@asyncio.coroutine
def test_no_content_length2(loop):
req = ClientRequest('head', URL('http://python.org'), loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert '0' == req.headers.get('CONTENT-LENGTH')
yield from req.close()
resp.close()
def test_content_type_auto_header_get(loop):
req = ClientRequest('get', URL('http://python.org'), loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert 'CONTENT-TYPE' not in req.headers
resp.close()
def test_content_type_auto_header_form(loop):
req = ClientRequest('post', URL('http://python.org'),
data={'hey': 'you'}, loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert 'application/x-www-form-urlencoded' == \
req.headers.get('CONTENT-TYPE')
resp.close()
def test_content_type_auto_header_bytes(loop):
req = ClientRequest('post', URL('http://python.org'), data=b'hey you',
loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert 'application/octet-stream' == req.headers.get('CONTENT-TYPE')
resp.close()
def test_content_type_skip_auto_header_bytes(loop):
req = ClientRequest('post', URL('http://python.org'), data=b'hey you',
skip_auto_headers={'Content-Type'},
loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert 'CONTENT-TYPE' not in req.headers
resp.close()
def test_content_type_skip_auto_header_form(loop):
req = ClientRequest('post', URL('http://python.org'),
data={'hey': 'you'}, loop=loop,
skip_auto_headers={'Content-Type'})
resp = req.send(mock.Mock(), mock.Mock())
assert 'CONTENT-TYPE' not in req.headers
resp.close()
def test_content_type_auto_header_content_length_no_skip(loop):
req = ClientRequest('get', URL('http://python.org'),
data=io.BytesIO(b'hey'),
skip_auto_headers={'Content-Length'},
loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert req.headers.get('CONTENT-LENGTH') == '3'
resp.close()
@asyncio.coroutine
def test_post_data(loop):
for meth in ClientRequest.POST_METHODS:
req = ClientRequest(
meth, URL('http://python.org/'),
data={'life': '42'}, loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert '/' == req.url.path
assert b'life=42' == req.body
assert 'application/x-www-form-urlencoded' ==\
req.headers['CONTENT-TYPE']
yield from req.close()
resp.close()
@asyncio.coroutine
def test_pass_falsy_data(loop):
with mock.patch(
'aiohttp.client_reqrep.ClientRequest.update_body_from_data'):
req = ClientRequest(
'post', URL('http://python.org/'),
data={}, loop=loop)
req.update_body_from_data.assert_called_once_with({}, frozenset())
yield from req.close()
@asyncio.coroutine
def test_pass_falsy_data_file(loop, tmpdir):
testfile = tmpdir.join('tmpfile').open('w+b')
testfile.write(b'data')
testfile.seek(0)
skip = frozenset([hdrs.CONTENT_TYPE])
req = ClientRequest(
'post', URL('http://python.org/'),
data=testfile,
skip_auto_headers=skip,
loop=loop)
assert req.headers.get('CONTENT-LENGTH', None) is not None
yield from req.close()
@asyncio.coroutine
def test_get_with_data(loop):
for meth in ClientRequest.GET_METHODS:
req = ClientRequest(
meth, URL('http://python.org/'), data={'life': '42'},
loop=loop)
assert '/' == req.url.path
assert b'life=42' == req.body
yield from req.close()
@asyncio.coroutine
def test_bytes_data(loop):
for meth in ClientRequest.POST_METHODS:
req = ClientRequest(
meth, URL('http://python.org/'),
data=b'binary data', loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert '/' == req.url.path
assert b'binary data' == req.body
assert 'application/octet-stream' == req.headers['CONTENT-TYPE']
yield from req.close()
resp.close()
@asyncio.coroutine
def test_content_encoding(loop):
req = ClientRequest('get', URL('http://python.org/'), data='foo',
compress='deflate', loop=loop)
with mock.patch('aiohttp.client_reqrep.aiohttp') as m_http:
resp = req.send(mock.Mock(), mock.Mock())
assert req.headers['TRANSFER-ENCODING'] == 'chunked'
assert req.headers['CONTENT-ENCODING'] == 'deflate'
m_http.Request.return_value\
.add_compression_filter.assert_called_with('deflate')
yield from req.close()
resp.close()
@asyncio.coroutine
def test_content_encoding_dont_set_headers_if_no_body(loop):
req = ClientRequest('get', URL('http://python.org/'),
compress='deflate', loop=loop)
with mock.patch('aiohttp.client_reqrep.aiohttp'):
resp = req.send(mock.Mock(), mock.Mock())
assert 'TRANSFER-ENCODING' not in req.headers
assert 'CONTENT-ENCODING' not in req.headers
yield from req.close()
resp.close()
@asyncio.coroutine
def test_content_encoding_header(loop):
req = ClientRequest(
'get', URL('http://python.org/'), data='foo',
headers={'Content-Encoding': 'deflate'}, loop=loop)
with mock.patch('aiohttp.client_reqrep.aiohttp') as m_http:
resp = req.send(mock.Mock(), mock.Mock())
assert req.headers['TRANSFER-ENCODING'] == 'chunked'
assert req.headers['CONTENT-ENCODING'] == 'deflate'
m_http.Request.return_value\
.add_compression_filter.assert_called_with('deflate')
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(8192)
yield from req.close()
resp.close()
@asyncio.coroutine
def test_chunked(loop):
req = ClientRequest(
'get', URL('http://python.org/'),
headers={'TRANSFER-ENCODING': 'gzip'}, loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert 'gzip' == req.headers['TRANSFER-ENCODING']
yield from req.close()
resp.close()
@asyncio.coroutine
def test_chunked2(loop):
req = ClientRequest(
'get', URL('http://python.org/'),
headers={'Transfer-encoding': 'chunked'}, loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert 'chunked' == req.headers['TRANSFER-ENCODING']
yield from req.close()
resp.close()
@asyncio.coroutine
def test_chunked_explicit(loop):
req = ClientRequest(
'get', URL('http://python.org/'), chunked=True, loop=loop)
with mock.patch('aiohttp.client_reqrep.aiohttp') as m_http:
resp = req.send(mock.Mock(), mock.Mock())
assert 'chunked' == req.headers['TRANSFER-ENCODING']
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(8192)
yield from req.close()
resp.close()
@asyncio.coroutine
def test_chunked_explicit_size(loop):
req = ClientRequest(
'get', URL('http://python.org/'), chunked=1024, loop=loop)
with mock.patch('aiohttp.client_reqrep.aiohttp') as m_http:
resp = req.send(mock.Mock(), mock.Mock())
assert 'chunked' == req.headers['TRANSFER-ENCODING']
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(1024)
yield from req.close()
resp.close()
@asyncio.coroutine
def test_chunked_length(loop):
req = ClientRequest(
'get', URL('http://python.org/'),
headers={'CONTENT-LENGTH': '1000'}, chunked=1024, loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert req.headers['TRANSFER-ENCODING'] == 'chunked'
assert 'CONTENT-LENGTH' not in req.headers
yield from req.close()
resp.close()
@asyncio.coroutine
def test_file_upload_not_chunked(loop):
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
req = ClientRequest(
'post', URL('http://python.org/'),
data=f,
loop=loop)
assert not req.chunked
assert req.headers['CONTENT-LENGTH'] == str(os.path.getsize(fname))
yield from req.close()
@asyncio.coroutine
def test_precompressed_data_stays_intact(loop):
data = zlib.compress(b'foobar')
req = ClientRequest(
'post', URL('http://python.org/'),
data=data,
headers={'CONTENT-ENCODING': 'deflate'},
compress=False,
loop=loop)
assert not req.compress
assert not req.chunked
assert req.headers['CONTENT-ENCODING'] == 'deflate'
yield from req.close()
@asyncio.coroutine
def test_file_upload_not_chunked_seek(loop):
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
f.seek(100)
req = ClientRequest(
'post', URL('http://python.org/'),
data=f,
loop=loop)
assert req.headers['CONTENT-LENGTH'] == \
str(os.path.getsize(fname) - 100)
yield from req.close()
@asyncio.coroutine
def test_file_upload_force_chunked(loop):
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
req = ClientRequest(
'post', URL('http://python.org/'),
data=f,
chunked=True,
loop=loop)
assert req.chunked
assert 'CONTENT-LENGTH' not in req.headers
yield from req.close()
def test_expect100(loop):
req = ClientRequest('get', URL('http://python.org/'),
expect100=True, loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert '100-continue' == req.headers['EXPECT']
assert req._continue is not None
req.terminate()
resp.close()
def test_expect_100_continue_header(loop):
req = ClientRequest('get', URL('http://python.org/'),
headers={'expect': '100-continue'}, loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert '100-continue' == req.headers['EXPECT']
assert req._continue is not None
req.terminate()
resp.close()
@asyncio.coroutine
def test_data_stream(loop):
def gen():
yield b'binary data'
return b' result'
req = ClientRequest(
'POST', URL('http://python.org/'), data=gen(), loop=loop)
assert req.chunked
assert inspect.isgenerator(req.body)
assert req.headers['TRANSFER-ENCODING'] == 'chunked'
transport = mock.Mock()
resp = req.send(transport, mock.Mock())
assert isinstance(req._writer, asyncio.Future)
yield from resp.wait_for_close()
assert req._writer is None
assert transport.write.mock_calls[-2:] == [
mock.call(b'12\r\nbinary data result\r\n'),
mock.call(b'0\r\n\r\n')]
yield from req.close()
@asyncio.coroutine
def test_data_file(loop):
req = ClientRequest(
'POST', URL('http://python.org/'),
data=io.BufferedReader(io.BytesIO(b'*' * 2)),
loop=loop)
assert req.chunked
assert isinstance(req.body, io.IOBase)
assert req.headers['TRANSFER-ENCODING'] == 'chunked'
transport = mock.Mock()
resp = req.send(transport, mock.Mock())
assert isinstance(req._writer, asyncio.Future)
yield from resp.wait_for_close()
assert req._writer is None
assert transport.write.mock_calls[-2:] == [
mock.call(b'2\r\n' + b'*' * 2 + b'\r\n'),
mock.call(b'0\r\n\r\n')]
yield from req.close()
@asyncio.coroutine
def test_data_stream_exc(loop):
fut = helpers.create_future(loop)
def gen():
yield b'binary data'
yield from fut
req = ClientRequest(
'POST', URL('http://python.org/'), data=gen(), loop=loop)
assert req.chunked
assert inspect.isgenerator(req.body)
assert req.headers['TRANSFER-ENCODING'] == 'chunked'
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=loop)
fut.set_exception(ValueError)
helpers.ensure_future(exc(), loop=loop)
protocol = mock.Mock()
resp = req.send(mock.Mock(), protocol)
connection = mock.Mock()
resp._connection = connection
yield from req._writer
assert connection.close.called
assert protocol.set_exception.called
yield from req.close()
@asyncio.coroutine
def test_data_stream_not_bytes(loop):
@asyncio.coroutine
def gen():
yield object()
req = ClientRequest(
'POST', URL('http://python.org/'), data=gen(), loop=loop)
protocol = mock.Mock()
resp = req.send(mock.Mock(), protocol)
yield from req._writer
assert protocol.set_exception.called
yield from req.close()
resp.close()
@asyncio.coroutine
def test_data_stream_exc_chain(loop):
fut = helpers.create_future(loop)
def gen():
yield from fut
req = ClientRequest('POST', URL('http://python.org/'),
data=gen(), loop=loop)
inner_exc = ValueError()
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=loop)
fut.set_exception(inner_exc)
helpers.ensure_future(exc(), loop=loop)
protocol = mock.Mock()
resp = req.send(mock.Mock(), protocol)
connection = mock.Mock()
resp._connection = connection
yield from req._writer
assert connection.close.called
assert protocol.set_exception.called
outer_exc = protocol.set_exception.call_args[0][0]
assert isinstance(outer_exc, aiohttp.ClientRequestError)
assert inner_exc is outer_exc.__context__
assert inner_exc is outer_exc.__cause__
yield from req.close()
@asyncio.coroutine
def test_data_stream_continue(loop):
def gen():
yield b'binary data'
return b' result'
req = ClientRequest(
'POST', URL('http://python.org/'), data=gen(),
expect100=True, loop=loop)
assert req.chunked
assert inspect.isgenerator(req.body)
def coro():
yield from asyncio.sleep(0.0001, loop=loop)
req._continue.set_result(1)
helpers.ensure_future(coro(), loop=loop)
transport = mock.Mock()
resp = req.send(transport, mock.Mock())
yield from req._writer
assert transport.write.mock_calls[-2:] == [
mock.call(b'12\r\nbinary data result\r\n'),
mock.call(b'0\r\n\r\n')]
yield from req.close()
resp.close()
@asyncio.coroutine
def test_data_continue(loop):
req = ClientRequest(
'POST', URL('http://python.org/'), data=b'data',
expect100=True, loop=loop)
def coro():
yield from asyncio.sleep(0.0001, loop=loop)
req._continue.set_result(1)
helpers.ensure_future(coro(), loop=loop)
transport = mock.Mock()
resp = req.send(transport, mock.Mock())
assert 1 == len(transport.write.mock_calls)
yield from req._writer
assert transport.write.mock_calls[-1] == mock.call(b'data')
yield from req.close()
resp.close()
@asyncio.coroutine
def test_close(loop):
@asyncio.coroutine
def gen():
yield from asyncio.sleep(0.00001, loop=loop)
return b'result'
req = ClientRequest(
'POST', URL('http://python.org/'), data=gen(), loop=loop)
transport = mock.Mock()
resp = req.send(transport, mock.Mock())
yield from req.close()
assert transport.write.mock_calls[-2:] == [
mock.call(b'6\r\nresult\r\n'),
mock.call(b'0\r\n\r\n')]
yield from req.close()
resp.close()
@asyncio.coroutine
def test_custom_response_class(loop):
class CustomResponse(ClientResponse):
def read(self, decode=False):
return 'customized!'
req = ClientRequest(
'GET', URL('http://python.org/'), response_class=CustomResponse,
loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert 'customized!' == resp.read()
yield from req.close()
resp.close()
@asyncio.coroutine
def test_terminate(loop):
req = ClientRequest('get', URL('http://python.org'), loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert req._writer is not None
writer = req._writer = mock.Mock()
req.terminate()
assert req._writer is None
writer.cancel.assert_called_with()
resp.close()
def test_terminate_with_closed_loop(loop):
req = ClientRequest('get', URL('http://python.org'), loop=loop)
resp = req.send(mock.Mock(), mock.Mock())
assert req._writer is not None
writer = req._writer = mock.Mock()
loop.close()
req.terminate()
assert req._writer is None
assert not writer.cancel.called
resp.close()
def test_terminate_without_writer(loop):
req = ClientRequest('get', URL('http://python.org'), loop=loop)
assert req._writer is None
req.terminate()
assert req._writer is None
@asyncio.coroutine
def test_custom_req_rep(loop):
conn = None
class CustomResponse(ClientResponse):
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
nonlocal conn
conn = connection
self.status = 123
self.reason = 'Test OK'
self.headers = CIMultiDictProxy(CIMultiDict())
self.cookies = SimpleCookie()
return
called = False
class CustomRequest(ClientRequest):
def send(self, writer, reader):
resp = self.response_class(self.method,
self.url,
writer=self._writer,
continue100=self._continue)
resp._post_init(self.loop)
self.response = resp
nonlocal called
called = True
return resp
@asyncio.coroutine
def create_connection(req):
assert isinstance(req, CustomRequest)
return mock.Mock(), mock.Mock()
connector = BaseConnector(loop=loop)
connector._create_connection = create_connection
resp = yield from aiohttp.request(
'get',
URL('http://example.com/path/to'),
request_class=CustomRequest,
response_class=CustomResponse,
connector=connector,
loop=loop)
assert isinstance(resp, CustomResponse)
assert called
resp.close()
conn.close()
|
{
"content_hash": "5c01968f0a682eee4d6e9e41839da7f1",
"timestamp": "",
"source": "github",
"line_count": 1036,
"max_line_length": 77,
"avg_line_length": 30.617760617760617,
"alnum_prop": 0.6183480453972258,
"repo_name": "moden-py/aiohttp",
"id": "659502249a523fbc0da8f402eaf6f7fdbef8a4a3",
"size": "31743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_client_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4885"
},
{
"name": "Makefile",
"bytes": "3042"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1096434"
},
{
"name": "Shell",
"bytes": "2298"
}
],
"symlink_target": ""
}
|
__all__ = ["Login","Register","AccessToken"]
|
{
"content_hash": "8f2e295918015b88bdfdd7b9326c82f5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 44,
"alnum_prop": 0.6136363636363636,
"repo_name": "KJSCE-C12/VDOC",
"id": "d9dff03bf79f03fc3d96478cae63b4f2ea5f3702",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/Cloud/VDOC/Depends/Models/UserModel/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16490"
}
],
"symlink_target": ""
}
|
from .. import DataType
class LoadFitting:
def __init__(self, parent=None):
self.parent = parent
self.session_dict = parent.session_dict
def table_dictionary(self):
self.parent.session_dict['fitting'] = self.session_dict["fitting"]
self.parent.table_loaded_from_session = True
self.parent.data_metadata[DataType.bin]['ui_accessed'] = self.parent.session_dict['fitting']['ui accessed']
self.parent.image_view_settings[DataType.fitting]['state'] = \
self.parent.session_dict[DataType.fitting]['image view state']
self.parent.image_view_settings[DataType.fitting]['histogram'] = \
self.parent.session_dict[DataType.fitting]['image view histogram']
|
{
"content_hash": "03c47c028f99e900c3f53c0b0d0a7516",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 115,
"avg_line_length": 41,
"alnum_prop": 0.6693766937669376,
"repo_name": "ornlneutronimaging/iBeatles",
"id": "a0a3d406381b3e4492d82fb07cd63710a4ec3842",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibeatles/session/load_fitting_tab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "870567"
}
],
"symlink_target": ""
}
|
from six import assertRaisesRegex
from unittest import TestCase
from dark.btop import countGaps, parseBtop, btop2cigar
def btop2cigarStr(btopString, concise=False, aa=False):
"""
Call btopString and turn its generator result into a string.
See btopString for the explanation of the args.
"""
return ''.join(btop2cigar(btopString, concise, aa))
class TestParseBtop(TestCase):
"""
Tests for the parseBtop function.
"""
def testOneLetter(self):
"""
An argument with just one letter must result in a ValueError.
"""
error = ("^BTOP string 'F' has a trailing query letter 'F' with no "
"corresponding subject letter$")
assertRaisesRegex(self, ValueError, error, list, parseBtop('F'))
def testConsecutiveGaps(self):
"""
An argument that has two consecutive gaps characters must result in a
ValueError.
"""
error = "^BTOP string '36--' has two consecutive gaps at offset 2$"
assertRaisesRegex(self, ValueError, error, list, parseBtop('36--'))
def testConsecutiveIdentical(self):
"""
An argument that has two consecutive identical (non-gap) characters
must result in a ValueError.
"""
error = ("^BTOP string '36AA' has two consecutive identical 'A' "
"letters at offset 2$")
assertRaisesRegex(self, ValueError, error, list, parseBtop('36AA'))
def testEmpty(self):
"""
An empty argument must result in an empty list.
"""
self.assertEqual([], list(parseBtop('')))
def testOneNumberWithTrailingOneLetter(self):
"""
An argument that is a number with a single letter must result in a
ValueError.
"""
error = ("^BTOP string '36F' has a trailing query letter 'F' with no "
"corresponding subject letter$")
assertRaisesRegex(self, ValueError, error, list, parseBtop('36F'))
def testThreeLetters(self):
"""
An argument that has three letters must result in a ValueError.
"""
error = ("^BTOP string 'ABC' has a trailing query letter 'C' with no "
"corresponding subject letter$")
assertRaisesRegex(self, ValueError, error, list, parseBtop('ABC'))
def testOneLetterThenANumber(self):
"""
An argument that is a single letter followed by a number must result
in a ValueError.
"""
error = ("^BTOP string 'F36' has a query letter 'F' at offset 0 with "
"no corresponding subject letter$")
assertRaisesRegex(self, ValueError, error, list, parseBtop('F36'))
def testTwoNumbersWithOneLetterBetween(self):
"""
An argument that is a number, a single letter, and another number must
result in a ValueError.
"""
error = ("^BTOP string '36F77' has a query letter 'F' at offset 2 "
"with no corresponding subject letter$")
assertRaisesRegex(self, ValueError, error, list, parseBtop('36F77'))
def testOneNumber(self):
"""
An argument that is just one number must give the expected result.
"""
self.assertEqual([54], list(parseBtop('54')))
def testOneNumberThatIsZero(self):
"""
An argument that is just the number zero must give the expected result.
"""
self.assertEqual([0], list(parseBtop('0')))
def testOneNumberWithLeadingZeroes(self):
"""
An argument that is just one number with leading zeroes must give the
expected result.
"""
self.assertEqual([54], list(parseBtop('0054')))
def testOneQuerySubjectPair(self):
"""
An argument that is a single query/subject letter pair must give the
expected result.
"""
self.assertEqual([('A', 'G')], list(parseBtop('AG')))
def testTwoQuerySubjectPairs(self):
"""
An argument that has two query/subject letter pairs must give the
expected result.
"""
self.assertEqual([('A', 'G'), ('C', 'T')], list(parseBtop('AGCT')))
def testOneQuerySubjectPairAndANumber(self):
"""
An argument that is a single query/subject letter pair followed by a
number must give the expected result.
"""
self.assertEqual([('A', 'G'), 33], list(parseBtop('AG33')))
class TestCountGaps(TestCase):
"""
Tests for the countGaps function.
"""
def testEmpty(self):
"""
An argument with an empty string must produce the expected result.
"""
self.assertEqual((0, 0), countGaps(''))
def testNumberOnly(self):
"""
An argument with just a number must produce the expected result.
"""
self.assertEqual((0, 0), countGaps('88'))
def testLettersButNoGaps(self):
"""
An argument with just letters must produce the expected result.
"""
self.assertEqual((0, 0), countGaps('FGAC'))
def testOneQueryGap(self):
"""
An argument with just a query gap must produce the expected result.
"""
self.assertEqual((1, 0), countGaps('-G'))
def testOneSubjectGap(self):
"""
An argument with just a subject gap must produce the expected result.
"""
self.assertEqual((0, 1), countGaps('G-'))
def testOneQueryAndOneSubjectGap(self):
"""
An argument with a query and a subject gap must produce the expected
result.
"""
self.assertEqual((1, 1), countGaps('G--G'))
def testMultipleQueryAndSubjectGaps(self):
"""
An argument with multiple query and a subject gaps must produce the
expected result.
"""
self.assertEqual((3, 2), countGaps('-GG-34-T-T39F-'))
class TestBtop2CigarPrecise(TestCase):
"""
Tests for the btop2cigar function when concise is False.
"""
def testEmpty(self):
"""
An empty BTOP string must result in an empty CIGAR string.
"""
self.assertEqual('', btop2cigarStr('', concise=False))
def testMixedMatch(self):
"""
If a BTOP string specifies that all characters match (in the imprecise
CIGAR sense where M could be identical characters or not), the CIGAR
string must be all Ms.
"""
self.assertEqual('7M', btop2cigarStr('2GC3AT', concise=False))
def testRefenceInsertion(self):
"""
If a BTOP string specifies that the query has character but the
subject (reference) does not, the CIGAR string must indicate an
insertion to the reference.
"""
self.assertEqual('1I', btop2cigarStr('A-', concise=False))
def testQueryInsertion(self):
"""
If a BTOP string specifies that the subject (reference) has character
but the query does not, the CIGAR string must indicate an deletion in
the reference.
"""
self.assertEqual('1D', btop2cigarStr('-A', concise=False))
def testAll(self):
"""
If a BTOP string specifies all possible variations, the CIGAR
string must be correct.
"""
self.assertEqual('7M2I4M2D5M',
btop2cigarStr('2GC3ATC-G-4-T-A5', concise=False))
def testAllList(self):
"""
If a BTOP string specifies all possible variations, the CIGAR
string components must be correct.
"""
self.assertEqual(['7M', '2I', '4M', '2D', '5M'],
list(btop2cigar('2GC3ATC-G-4-T-A5', concise=False)))
def testAllAA(self):
"""
If a BTOP string specifies all possible variations, and we indicate
that the BTOP string refers to amino acids, the CIGAR string must be
correct (i.e., all counts must be tripled).
"""
self.assertEqual(
'21M6I12M6D15M',
btop2cigarStr('2GC3ATC-G-4-T-A5', concise=False, aa=True))
def testAllAAList(self):
"""
If a BTOP string specifies all possible variations, and we indicate
that the BTOP string refers to amino acids, the CIGAR string components
must be correct (i.e., all counts must be tripled).
"""
self.assertEqual(
['21M', '6I', '12M', '6D', '15M'],
list(btop2cigar('2GC3ATC-G-4-T-A5', concise=False, aa=True)))
class TestBtop2CigarConcise(TestCase):
"""
Tests for the btop2cigar function when concise is True.
"""
def testEmpty(self):
"""
An empty BTOP string must result in an empty CIGAR string.
"""
self.assertEqual('', btop2cigarStr('', concise=True))
def testMixedMatch(self):
"""
If a BTOP string specifies that some characters match and some do
not, the CIGAR string must be specific about the matches / mismatches.
"""
self.assertEqual('2=1X3=1X', btop2cigarStr('2GC3AT', concise=True))
def testRefenceInsertion(self):
"""
If a BTOP string specifies that the query has character but the
subject (reference) does not, the CIGAR string must indicate an
insertion to the reference.
"""
self.assertEqual('1I', btop2cigarStr('A-', concise=True))
def testQueryInsertion(self):
"""
If a BTOP string specifies that the subject (reference) has character
but the query does not, the CIGAR string must indicate an deletion in
the reference.
"""
self.assertEqual('1D', btop2cigarStr('-A', concise=True))
def testAll(self):
"""
If a BTOP string specifies all possible variations, the CIGAR
string must be correct.
"""
self.assertEqual('2=1X3=1X2I4=2D5=',
btop2cigarStr('2GC3ATC-G-4-T-A5', concise=True))
def testWithAATrue(self):
"""
If concise and aa are both set to True, a ValueError must be raised.
"""
error = '^aa and concise cannot both be True$'
assertRaisesRegex(self, ValueError, error, btop2cigarStr, '',
concise=True, aa=True)
|
{
"content_hash": "ade7237ae6e72f5ab561aa84225c41a0",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 79,
"avg_line_length": 34.65423728813559,
"alnum_prop": 0.6052039518732271,
"repo_name": "acorg/dark-matter",
"id": "72d8fbcaa3446593910a7e03d7cb7201caf57791",
"size": "10223",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_btop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3390"
},
{
"name": "Python",
"bytes": "2402522"
},
{
"name": "Shell",
"bytes": "11636"
}
],
"symlink_target": ""
}
|
from beritest_tools import BaseBERITestCase
class test_raw_pism_truncation(BaseBERITestCase):
def test_addressing(self):
'''Test PISM isn't mangling addresses.'''
self.assertRegisterEqual(self.MIPS.a0, 0x1111, "Load of stored double word failed")
|
{
"content_hash": "abe019235776e290463e3fd8cc8e1dc5",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 91,
"avg_line_length": 44.666666666666664,
"alnum_prop": 0.7388059701492538,
"repo_name": "8l/beri",
"id": "e23535c057e944a2be816ff193c26d96afbcc9d4",
"size": "1810",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/mem/test_raw_pism_truncation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
}
|
from .sink import (
HttpTransportSink
)
from ..core import Scales
from ..loadbalancer.aperture import ApertureBalancerSink
from ..pool import SingletonPoolSink
class _HttpIface(object):
def Get(self, url, **kwargs):
"""Issue an HTTP GET to url.
kwargs aligns with arguments to requests, although timeout is ignored.
"""
pass
def Post(self, url, **kwargs):
"""Issue an HTTP POST to url.
kwargs aligns with arguments to requests, although timeout is ignored.
"""
pass
def Put(self, url, **kwargs):
"""Issue an HTTP PUT to url.
kwargs aligns with arguments to requests, although timeout is ignored.
"""
pass
def Delete(self, url, **kwargs):
"""Issue an HTTP DELETE to url.
kwargs aligns with arguments to requests, although timeout is ignored.
"""
pass
def Patch(self, url, **kwargs):
"""Issue an HTTP PATCH to url.
kwargs aligns with arguments to requests, although timeout is ignored.
"""
pass
def Head(self, url, **kwargs):
"""Issue an HTTP HEAD to url.
kwargs aligns with arguments to requests, although timeout is ignored.
"""
pass
def Options(self, url, **kwargs):
"""Issue an HTTP OPTIONS to url.
kwargs aligns with arguments to requests, although timeout is ignored.
"""
pass
class Http(object):
@staticmethod
def NewBuilder():
return Scales.NewBuilder(_HttpIface)\
.WithSink(ApertureBalancerSink.Builder())\
.WithSink(SingletonPoolSink.Builder())\
.WithSink(HttpTransportSink.Builder())
@staticmethod
def NewClient(uri, timeout=60):
return Http.NewBuilder()\
.SetUri(uri)\
.SetTimeout(timeout)\
.Build()
|
{
"content_hash": "f0a53f3dafc3df493ba68c94cda86369",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 74,
"avg_line_length": 23.397260273972602,
"alnum_prop": 0.6662763466042154,
"repo_name": "steveniemitz/scales",
"id": "19ec41c84bb328f4a2807159ad3a2a4ea140bd88",
"size": "1708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scales/http/builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223553"
},
{
"name": "Shell",
"bytes": "60"
},
{
"name": "Thrift",
"bytes": "525"
}
],
"symlink_target": ""
}
|
from Tkinter import Tk
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append('i can copy to clipboardz?')
r.destroy()
|
{
"content_hash": "cd36f0f3383700e57cb033404b1e2362",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 47,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.72,
"repo_name": "wolfdale/10MinutesEmailWrapper",
"id": "09ad503e7348a363551e5dfb55b3ade103da1167",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "768"
}
],
"symlink_target": ""
}
|
__author__ = 'gjp'
from django.test import TestCase
from mock import patch
from fiware_cloto.cloto.manager import RuleManager
from fiware_cloto.cloto.models import RuleModel
class RuleManagerTests(TestCase):
def setUp(self):
self.rule = '{\"name\": \"test Name\", \"condition\": ' \
'{\"cpu\": {\"value\": 98, \"operand\": \"greater\"},' \
' \"mem\": {\"value\": 98, \"operand\": \"greater\"},' \
' \"hdd\": {\"value\": 98, \"operand\": \"greater\"},' \
' \"net\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"actionName\": \"notify-scale\", \"operation\": \"scaleUp\"}}'
self.rule_without_operation = '{\"name\": \"test Name\", \"condition\": ' \
'{\"cpu\": {\"value\": 98, \"operand\": \"greater\"},' \
' \"mem\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"actionName\": \"notify-scale\"}}'
self.rule_empty_actionname = '{\"name\": \"test Name\", \"condition\": ' \
'{\"cpu\": {\"value\": 98, \"operand\": \"greater\"},' \
' \"mem\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"actionName\": \"\", \"operation\": \"scaleUp\"}}'
self.rule_without_actionname = '{\"name\": \"test Name\", \"condition\": ' \
'{\"cpu\": {\"value\": 98, \"operand\": \"greater\"},' \
' \"mem\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"operation\": \"scaleUp\"}}'
self.rule_operation_unknown = '{\"name\": \"test Name\", \"condition\": ' \
'{\"cpu\": {\"value\": 98, \"operand\": \"greater\"},' \
' \"mem\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"actionName\": \"notify-scale\", \"operation\": \"unknown\"}}'
self.rule_cpu_overlimit = '{\"name\": \"test Name\", \"condition\": ' \
'{\"cpu\": {\"value\": 101, \"operand\": \"greater\"},' \
' \"mem\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"actionName\": \"notify-scale\", \"operation\": \"scaleUp\"}}'
self.rule_operand_unknown = '{\"name\": \"test Name\", \"condition\": ' \
'{\"cpu\": {\"value\": 101, \"operand\": \"unknown\"},' \
' \"mem\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"actionName\": \"notify-scale\", \"operation\": \"scaleUp\"}}'
self.rule_condition_parameter_missing = '{\"name\": \"test Name\", \"condition\": ' \
'{\"mem\": {\"value\": 95, \"operand\": \"greater equal\"}},' \
'\"action\": {\"actionName\": \"notify-scale\", \"operation\": \"scaleUp\"}}'
self.tenantId = "tenantId"
self.serverId = "serverId"
@patch('fiware_cloto.cloto.manager.RuleManager.logger')
def test_pimp_rule(self, mock_logging):
"""Test if method creates the first rule for a server and fails when update it with fake information."""
rule = RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId, self.rule)
self.assertIsInstance(rule, RuleModel)
self.assertIsNotNone(rule.ruleId)
self.assertTrue(mock_logging.info.called)
def test_pimp_rule_error_1(self):
"""Test if method throws error with malformed rule without action name."""
try:
RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId, self.rule_without_actionname)
except KeyError as ex:
self.assertRaises(ex)
def test_pimp_rule_error_2(self):
"""Test if method throws error with malformed rule without operation."""
try:
RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId, self.rule_without_operation)
except KeyError as ex:
self.assertRaises(ex)
def test_pimp_rule_error_3(self):
"""Test if method throws error with malformed rule with an empty action name."""
try:
RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId, self.rule_empty_actionname)
except ValueError as ex:
self.assertRaises(ex)
def test_pimp_rule_error_4(self):
"""Test if method throws error with malformed rule with an unknown operation."""
try:
RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId, self.rule_operation_unknown)
except ValueError as ex:
self.assertRaises(ex)
def test_pimp_rule_error_5(self):
"""Test if method throws error with malformed rule, CPU has value over 100"""
try:
RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId, self.rule_cpu_overlimit)
except ValueError as ex:
self.assertRaises(ex)
def test_pimp_rule_error_6(self):
"""Test if method throws error with malformed rule with an unknown operand. """
try:
RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId, self.rule_operand_unknown)
except ValueError as ex:
self.assertRaises(ex)
def test_pimp_rule_condition_error_1(self):
"""Test if method throws error with malformed rule, with a missing parameter in the condition."""
try:
RuleManager.RuleManager().create_specific_rule(self.tenantId, self.serverId,
self.rule_condition_parameter_missing)
except KeyError as ex:
self.assertRaises(ex)
|
{
"content_hash": "8f792a9e888c631504a94845b6ef706a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 118,
"avg_line_length": 56.21153846153846,
"alnum_prop": 0.5398563120082107,
"repo_name": "Fiware/cloud.Cloto",
"id": "bc3b289069af60bf1bd70d26bab1007f14f32709",
"size": "6665",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fiware_cloto/cloto/tests/test_parsing_rules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "50363"
},
{
"name": "Gherkin",
"bytes": "66746"
},
{
"name": "Python",
"bytes": "324830"
},
{
"name": "Shell",
"bytes": "9496"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..message_parser import MessageParser
__all__ = ['ClientMessageParser']
class ClientMessageParser(MessageParser):
pass
|
{
"content_hash": "843f7471b2f3a5de68e67b585144cf86",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 42,
"avg_line_length": 20.166666666666668,
"alnum_prop": 0.7520661157024794,
"repo_name": "foxdog-studios/pyddp",
"id": "1a7055a28521d88d60eb5d67470a2f29802010a9",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ddp/messages/client/client_message_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "109"
},
{
"name": "Python",
"bytes": "182982"
},
{
"name": "Shell",
"bytes": "3484"
}
],
"symlink_target": ""
}
|
import re
from django import forms
from hydroshare.models import HydroShareResource
class MDLRadioButton(forms.RadioSelect):
def render(self, name, value, attrs=None, renderer=None):
"""Adds MDL HTML classes to label and input tags"""
html = super(MDLRadioButton, self).render(name, value, attrs=attrs, renderer=renderer)
html = re.sub(r'</?(ul|li).*?>', '', html)
html = re.sub(r'(<label )', r'\1class="mdl-radio mdl-js-radio mdl-js-ripple-effect" ', html)
html = re.sub(r'(<input )', r'\1class="mdl-radio__button" ', html)
return html
class HydroShareSettingsForm(forms.Form):
def __init__(self, *args, **kwargs):
super(HydroShareSettingsForm, self).__init__(*args, **kwargs)
# if 'site_registration' in self.initial:
# site = self.initial['site_registration']
# elif len(args) > 0:
# site = args[0]['site_registration']
# self.resources = forms.ModelChoiceField(queryset=HydroShareResource.objects.filter(site_registration=site),
# required=False)
schedule_choices = (
('scheduled', 'Scheduled'),
('manual', 'Manual')
)
data_type_choices = (
('TS', 'Time Series'),
('LP', 'Leaf Pack Experiments'),
# ('SD', 'Stream Data')
)
schedule_freq_choices = (
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly')
)
pause_sharing = forms.BooleanField(initial=False, label='Pause Sharing', required=False)
site_registration = forms.CharField(max_length=255)
schedule_type = forms.ChoiceField(
widget=MDLRadioButton,
choices=schedule_choices,
initial='scheduled'
)
update_freq = forms.ChoiceField(
required=False,
widget=forms.Select,
choices=schedule_freq_choices,
initial='daily'
)
data_types = forms.MultipleChoiceField(
# TODO: When EnviroDIY supports multiple data types, the 'required' attribute should be set to True
required=False,
widget=forms.CheckboxSelectMultiple,
choices=data_type_choices,
initial='TS',
error_messages={'required': 'Please select at least one data type.'}
)
abstract = forms.CharField(
required=False,
widget=forms.Textarea,
label='Abstract'
)
title = forms.CharField(
required=False,
widget=forms.TextInput,
label='Resource Title',
)
resources = forms.ModelChoiceField(queryset=HydroShareResource.objects.all(), required=False)
# TODO: Make this a model form
# class Meta:
# model = HydroShareResource
# fields = ['hs_account', 'ext_id', 'site_registration', 'sync_type', 'update_freq', 'is_enabled', 'data_types']
class HydroShareResourceDeleteForm(forms.Form):
delete_external_resource = forms.BooleanField(
initial=False,
label="Delete connected resource in HydroShare.",
required=False)
|
{
"content_hash": "a9c029887950443b9983fff2ea1c0c9f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 120,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.6151067323481116,
"repo_name": "ODM2/ODM2WebSDL",
"id": "63513528ca4bbe9484e760efcc19a3a593d7b691",
"size": "3070",
"binary": false,
"copies": "1",
"ref": "refs/heads/StreamWatch",
"path": "src/hydroshare/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20704"
},
{
"name": "HTML",
"bytes": "125578"
},
{
"name": "JavaScript",
"bytes": "62876"
},
{
"name": "Python",
"bytes": "199614"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("comms", "0010_auto_20161206_1912")]
operations = [
migrations.AlterField(
model_name="channeldb",
name="db_attributes",
field=models.ManyToManyField(
help_text="attributes on this object. An attribute can hold any pickle-able python object (see docs for special cases).",
to="typeclasses.Attribute",
),
),
migrations.AlterField(
model_name="channeldb",
name="db_object_subscriptions",
field=models.ManyToManyField(
blank=True,
db_index=True,
related_name="object_subscription_set",
to="objects.ObjectDB",
verbose_name="subscriptions",
),
),
migrations.AlterField(
model_name="channeldb",
name="db_subscriptions",
field=models.ManyToManyField(
blank=True,
db_index=True,
related_name="subscription_set",
to=settings.AUTH_USER_MODEL,
verbose_name="subscriptions",
),
),
migrations.AlterField(
model_name="channeldb",
name="db_tags",
field=models.ManyToManyField(
help_text="tags on this object. Tags are simple string markers to identify, group and alias objects.",
to="typeclasses.Tag",
),
),
migrations.AlterField(
model_name="msg",
name="db_hide_from_channels",
field=models.ManyToManyField(
blank=True, related_name="hide_from_channels_set", to="comms.ChannelDB"
),
),
migrations.AlterField(
model_name="msg",
name="db_hide_from_objects",
field=models.ManyToManyField(
blank=True, related_name="hide_from_objects_set", to="objects.ObjectDB"
),
),
migrations.AlterField(
model_name="msg",
name="db_hide_from_accounts",
field=models.ManyToManyField(
blank=True, related_name="hide_from_accounts_set", to=settings.AUTH_USER_MODEL
),
),
migrations.AlterField(
model_name="msg",
name="db_receivers_channels",
field=models.ManyToManyField(
blank=True,
help_text="channel recievers",
related_name="channel_set",
to="comms.ChannelDB",
),
),
migrations.AlterField(
model_name="msg",
name="db_receivers_objects",
field=models.ManyToManyField(
blank=True,
help_text="object receivers",
related_name="receiver_object_set",
to="objects.ObjectDB",
),
),
migrations.AlterField(
model_name="msg",
name="db_receivers_accounts",
field=models.ManyToManyField(
blank=True,
help_text="account receivers",
related_name="receiver_account_set",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="msg",
name="db_sender_objects",
field=models.ManyToManyField(
blank=True,
db_index=True,
related_name="sender_object_set",
to="objects.ObjectDB",
verbose_name="sender(object)",
),
),
migrations.AlterField(
model_name="msg",
name="db_sender_accounts",
field=models.ManyToManyField(
blank=True,
db_index=True,
related_name="sender_account_set",
to=settings.AUTH_USER_MODEL,
verbose_name="sender(account)",
),
),
migrations.AlterField(
model_name="msg",
name="db_tags",
field=models.ManyToManyField(
blank=True,
help_text="tags on this message. Tags are simple string markers to identify, group and alias messages.",
to="typeclasses.Tag",
),
),
]
|
{
"content_hash": "7d3fe6de9b97f6cf6251a0f961972380",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 137,
"avg_line_length": 34.52307692307692,
"alnum_prop": 0.5040106951871658,
"repo_name": "jamesbeebop/evennia",
"id": "8c212a7befbe5e449a629d83766687543c996148",
"size": "4563",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "evennia/comms/migrations/0011_auto_20170606_1731.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2143170"
}
],
"symlink_target": ""
}
|
import types
import asyncio
from typing import Optional, Type
from algoliasearch.recommendation_client import RecommendationClient
from algoliasearch.configs import RecommendationConfig
from algoliasearch.helpers_async import _create_async_methods_in
from algoliasearch.http.transporter_async import TransporterAsync
class RecommendationClientAsync(RecommendationClient):
def __init__(self, recommendation_client, transporter, search_config):
# type: (RecommendationClient, TransporterAsync, RecommendationConfig) -> None # noqa: E501
self._transporter_async = transporter
super(RecommendationClientAsync, self).__init__(
recommendation_client._transporter, search_config
)
client = RecommendationClient(transporter, search_config)
_create_async_methods_in(self, client)
@asyncio.coroutine
def __aenter__(self):
# type: () -> RecommendationClientAsync # type: ignore
return self # type: ignore
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb): # type: ignore
# type: (Optional[Type[BaseException]], Optional[BaseException],Optional[types.TracebackType]) -> None # noqa: E501
yield from self.close_async() # type: ignore
@asyncio.coroutine
def close_async(self): # type: ignore
# type: () -> None
super().close()
yield from self._transporter_async.close() # type: ignore
|
{
"content_hash": "2bee28711c6a77c916b1c327156d0027",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 123,
"avg_line_length": 33.53488372093023,
"alnum_prop": 0.7004160887656034,
"repo_name": "algolia/algoliasearch-client-python",
"id": "784c64aae45128473c1a92b26933212f8a3b2291",
"size": "1442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algoliasearch/recommendation_client_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "239410"
}
],
"symlink_target": ""
}
|
from ginga.gtkw import ImageViewGtk
from ginga.canvas.mixins import DrawingMixin, CanvasMixin, CompoundMixin
from ginga.util.toolbox import ModeIndicator
class ImageViewCanvasError(ImageViewGtk.ImageViewGtkError):
pass
class ImageViewCanvas(ImageViewGtk.ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, rgbmap=None, settings=None,
bindmap=None, bindings=None):
ImageViewGtk.ImageViewZoom.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self)
# we are both a viewer and a canvas
self.set_canvas(self, private_canvas=self)
self._mi = ModeIndicator(self)
#END
|
{
"content_hash": "31eac906f1adf2b8b051a7451e22eeaa",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 36.214285714285715,
"alnum_prop": 0.5788954635108481,
"repo_name": "Cadair/ginga",
"id": "b7fa0c071583292e8891ba87760c9c379bc0758c",
"size": "1290",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ginga/gtkw/ImageViewCanvasGtk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1939"
},
{
"name": "JavaScript",
"bytes": "8724"
},
{
"name": "Python",
"bytes": "2458171"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
from fabric.api import local, cd, prefix, env
import os
BASE_DIR = os.path.join(os.path.dirname(__file__))
ENV_DIR = os.path.join(BASE_DIR, 'env')
PROJECT = "shanemartin"
activate = os.path.join(BASE_DIR, 'env', 'bin', 'activate')
@contextmanager
def virtualenv():
with prefix('. {activate}'.format(activate=activate)):
yield
def setup(environ='dev'):
"""
Setup virtual env and python packages.
"""
requirements = os.path.join(
BASE_DIR, 'requirements', '{}.txt'.format(environ))
local("virtualenv env")
with virtualenv():
local("pip install -r {}".format(requirements))
def run():
"""
Runs the application in development mode.
"""
with virtualenv():
local("python manage.py server")
def freeze():
with virtualenv():
local("python freeze.py")
|
{
"content_hash": "e57b2c965f5a3512cd061805bafda2b7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 59,
"avg_line_length": 21,
"alnum_prop": 0.6360544217687075,
"repo_name": "shamrt/sha.nemart.in-flask",
"id": "0c95cdd427da67bdabaa23fd3e09a04e824f11f8",
"size": "1005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3149"
},
{
"name": "HTML",
"bytes": "7145"
},
{
"name": "JavaScript",
"bytes": "414"
},
{
"name": "Python",
"bytes": "6698"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from jacket.i18n import _LW
from jacket.compute.scheduler import filters
from jacket.compute import servicegroup
LOG = logging.getLogger(__name__)
class ComputeFilter(filters.BaseHostFilter):
"""Filter on active Compute nodes."""
def __init__(self):
self.servicegroup_api = servicegroup.API()
# Host state does not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, spec_obj):
"""Returns True for only active compute nodes."""
service = host_state.service
if service['disabled']:
LOG.debug("%(host_state)s is disabled, reason: %(reason)s",
{'host_state': host_state,
'reason': service.get('disabled_reason')})
return False
else:
if not self.servicegroup_api.service_is_up(service):
LOG.warning(_LW("%(host_state)s has not been heard from in a "
"while"), {'host_state': host_state})
return False
return True
|
{
"content_hash": "d41ebbab465842610f191b5b257e63a2",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 34.5625,
"alnum_prop": 0.6003616636528029,
"repo_name": "HybridF5/jacket",
"id": "2a99e20341a65f0e3f493af2b80f03c4a5ab3232",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/compute/scheduler/filters/compute_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
}
|
import os
import conexiuneBaza
import MySQLdb
import datetime
def afisareDestinatii(bazaDate):
sql="Select * from sejur WHERE data_plecare >= CURDATE() AND ora_plecare >= CURTIME()"
cursor=bazaDate.cursor()
try:
cursor.execute(sql)
a=datetime.datetime.now().strftime("%Y-%m-%d")
datetime.datetime.now().strftime("%H:%M:%S")
nrDate=int(cursor.rowcount)
for x in range(0, nrDate):
row=cursor.fetchone()
print "ID-plecare:",row[0], " Numar Avion:", row[1]," Oras plecare:",row[2],"Oras destinatie:",row[3],"Data plecare:",row[4],"Ora plecare:",row[5],"Data Aterizare:",row[6],"Ora Aterizare:",row[7]
except Exception, e:
print "Eroare interogatie la baza de date ->", e
bazaDate.commit()
cursor.close()
bazaDate.commit()
|
{
"content_hash": "312f5bb8cd0418d1be795f4be621e60e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 206,
"avg_line_length": 41.4,
"alnum_prop": 0.6256038647342995,
"repo_name": "acd8/proiectBDOO",
"id": "b6caebf14c3c1d9b105a9fad8e6749b96a094508",
"size": "828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ProiectFinal_BDOO/listaDestinatii.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9607"
}
],
"symlink_target": ""
}
|
"""Sorting helpers for ISY994 device classifications."""
from __future__ import annotations
from typing import Any
from pyisy.constants import (
ISY_VALUE_UNKNOWN,
PROTO_GROUP,
PROTO_INSTEON,
PROTO_PROGRAM,
PROTO_ZWAVE,
TAG_FOLDER,
)
from pyisy.nodes import Group, Node, Nodes
from pyisy.programs import Programs
from pyisy.variables import Variables
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate.const import DOMAIN as CLIMATE
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import async_get_registry
from .const import (
_LOGGER,
DEFAULT_PROGRAM_STRING,
DOMAIN,
FILTER_INSTEON_TYPE,
FILTER_NODE_DEF_ID,
FILTER_STATES,
FILTER_UOM,
FILTER_ZWAVE_CAT,
ISY994_NODES,
ISY994_PROGRAMS,
ISY994_VARIABLES,
ISY_GROUP_PLATFORM,
KEY_ACTIONS,
KEY_STATUS,
NODE_FILTERS,
PLATFORMS,
PROGRAM_PLATFORMS,
SUBNODE_CLIMATE_COOL,
SUBNODE_CLIMATE_HEAT,
SUBNODE_EZIO2X4_SENSORS,
SUBNODE_FANLINC_LIGHT,
SUBNODE_IOLINC_RELAY,
TYPE_CATEGORY_SENSOR_ACTUATORS,
TYPE_EZIO2X4,
UOM_DOUBLE_TEMP,
UOM_ISYV4_DEGREES,
)
BINARY_SENSOR_UOMS = ["2", "78"]
BINARY_SENSOR_ISY_STATES = ["on", "off"]
def _check_for_node_def(
hass_isy_data: dict, node: Group | Node, single_platform: str = None
) -> bool:
"""Check if the node matches the node_def_id for any platforms.
This is only present on the 5.0 ISY firmware, and is the most reliable
way to determine a device's type.
"""
if not hasattr(node, "node_def_id") or node.node_def_id is None:
# Node doesn't have a node_def (pre 5.0 firmware most likely)
return False
node_def_id = node.node_def_id
platforms = PLATFORMS if not single_platform else [single_platform]
for platform in platforms:
if node_def_id in NODE_FILTERS[platform][FILTER_NODE_DEF_ID]:
hass_isy_data[ISY994_NODES][platform].append(node)
return True
return False
def _check_for_insteon_type(
hass_isy_data: dict, node: Group | Node, single_platform: str = None
) -> bool:
"""Check if the node matches the Insteon type for any platforms.
This is for (presumably) every version of the ISY firmware, but only
works for Insteon device. "Node Server" (v5+) and Z-Wave and others will
not have a type.
"""
if not hasattr(node, "protocol") or node.protocol != PROTO_INSTEON:
return False
if not hasattr(node, "type") or node.type is None:
# Node doesn't have a type (non-Insteon device most likely)
return False
device_type = node.type
platforms = PLATFORMS if not single_platform else [single_platform]
for platform in platforms:
if any(
device_type.startswith(t)
for t in set(NODE_FILTERS[platform][FILTER_INSTEON_TYPE])
):
# Hacky special-cases for certain devices with different platforms
# included as subnodes. Note that special-cases are not necessary
# on ISY 5.x firmware as it uses the superior NodeDefs method
subnode_id = int(node.address.split(" ")[-1], 16)
# FanLinc, which has a light module as one of its nodes.
if platform == FAN and subnode_id == SUBNODE_FANLINC_LIGHT:
hass_isy_data[ISY994_NODES][LIGHT].append(node)
return True
# Thermostats, which has a "Heat" and "Cool" sub-node on address 2 and 3
if platform == CLIMATE and subnode_id in (
SUBNODE_CLIMATE_COOL,
SUBNODE_CLIMATE_HEAT,
):
hass_isy_data[ISY994_NODES][BINARY_SENSOR].append(node)
return True
# IOLincs which have a sensor and relay on 2 different nodes
if (
platform == BINARY_SENSOR
and device_type.startswith(TYPE_CATEGORY_SENSOR_ACTUATORS)
and subnode_id == SUBNODE_IOLINC_RELAY
):
hass_isy_data[ISY994_NODES][SWITCH].append(node)
return True
# Smartenit EZIO2X4
if (
platform == SWITCH
and device_type.startswith(TYPE_EZIO2X4)
and subnode_id in SUBNODE_EZIO2X4_SENSORS
):
hass_isy_data[ISY994_NODES][BINARY_SENSOR].append(node)
return True
hass_isy_data[ISY994_NODES][platform].append(node)
return True
return False
def _check_for_zwave_cat(
hass_isy_data: dict, node: Group | Node, single_platform: str = None
) -> bool:
"""Check if the node matches the ISY Z-Wave Category for any platforms.
This is for (presumably) every version of the ISY firmware, but only
works for Z-Wave Devices with the devtype.cat property.
"""
if not hasattr(node, "protocol") or node.protocol != PROTO_ZWAVE:
return False
if not hasattr(node, "zwave_props") or node.zwave_props is None:
# Node doesn't have a device type category (non-Z-Wave device)
return False
device_type = node.zwave_props.category
platforms = PLATFORMS if not single_platform else [single_platform]
for platform in platforms:
if any(
device_type.startswith(t)
for t in set(NODE_FILTERS[platform][FILTER_ZWAVE_CAT])
):
hass_isy_data[ISY994_NODES][platform].append(node)
return True
return False
def _check_for_uom_id(
hass_isy_data: dict,
node: Group | Node,
single_platform: str = None,
uom_list: list = None,
) -> bool:
"""Check if a node's uom matches any of the platforms uom filter.
This is used for versions of the ISY firmware that report uoms as a single
ID. We can often infer what type of device it is by that ID.
"""
if not hasattr(node, "uom") or node.uom in (None, ""):
# Node doesn't have a uom (Scenes for example)
return False
# Backwards compatibility for ISYv4 Firmware:
node_uom = node.uom
if isinstance(node.uom, list):
node_uom = node.uom[0]
if uom_list:
if node_uom in uom_list:
hass_isy_data[ISY994_NODES][single_platform].append(node)
return True
return False
platforms = PLATFORMS if not single_platform else [single_platform]
for platform in platforms:
if node_uom in NODE_FILTERS[platform][FILTER_UOM]:
hass_isy_data[ISY994_NODES][platform].append(node)
return True
return False
def _check_for_states_in_uom(
hass_isy_data: dict,
node: Group | Node,
single_platform: str = None,
states_list: list = None,
) -> bool:
"""Check if a list of uoms matches two possible filters.
This is for versions of the ISY firmware that report uoms as a list of all
possible "human readable" states. This filter passes if all of the possible
states fit inside the given filter.
"""
if not hasattr(node, "uom") or node.uom in (None, ""):
# Node doesn't have a uom (Scenes for example)
return False
# This only works for ISYv4 Firmware where uom is a list of states:
if not isinstance(node.uom, list):
return False
node_uom = set(map(str.lower, node.uom))
if states_list:
if node_uom == set(states_list):
hass_isy_data[ISY994_NODES][single_platform].append(node)
return True
return False
platforms = PLATFORMS if not single_platform else [single_platform]
for platform in platforms:
if node_uom == set(NODE_FILTERS[platform][FILTER_STATES]):
hass_isy_data[ISY994_NODES][platform].append(node)
return True
return False
def _is_sensor_a_binary_sensor(hass_isy_data: dict, node: Group | Node) -> bool:
"""Determine if the given sensor node should be a binary_sensor."""
if _check_for_node_def(hass_isy_data, node, single_platform=BINARY_SENSOR):
return True
if _check_for_insteon_type(hass_isy_data, node, single_platform=BINARY_SENSOR):
return True
# For the next two checks, we're providing our own set of uoms that
# represent on/off devices. This is because we can only depend on these
# checks in the context of already knowing that this is definitely a
# sensor device.
if _check_for_uom_id(
hass_isy_data, node, single_platform=BINARY_SENSOR, uom_list=BINARY_SENSOR_UOMS
):
return True
if _check_for_states_in_uom(
hass_isy_data,
node,
single_platform=BINARY_SENSOR,
states_list=BINARY_SENSOR_ISY_STATES,
):
return True
return False
def _categorize_nodes(
hass_isy_data: dict, nodes: Nodes, ignore_identifier: str, sensor_identifier: str
) -> None:
"""Sort the nodes to their proper platforms."""
for (path, node) in nodes:
ignored = ignore_identifier in path or ignore_identifier in node.name
if ignored:
# Don't import this node as a device at all
continue
if hasattr(node, "protocol") and node.protocol == PROTO_GROUP:
hass_isy_data[ISY994_NODES][ISY_GROUP_PLATFORM].append(node)
continue
if sensor_identifier in path or sensor_identifier in node.name:
# User has specified to treat this as a sensor. First we need to
# determine if it should be a binary_sensor.
if _is_sensor_a_binary_sensor(hass_isy_data, node):
continue
hass_isy_data[ISY994_NODES][SENSOR].append(node)
continue
# We have a bunch of different methods for determining the device type,
# each of which works with different ISY firmware versions or device
# family. The order here is important, from most reliable to least.
if _check_for_node_def(hass_isy_data, node):
continue
if _check_for_insteon_type(hass_isy_data, node):
continue
if _check_for_zwave_cat(hass_isy_data, node):
continue
if _check_for_uom_id(hass_isy_data, node):
continue
if _check_for_states_in_uom(hass_isy_data, node):
continue
# Fallback as as sensor, e.g. for un-sortable items like NodeServer nodes.
hass_isy_data[ISY994_NODES][SENSOR].append(node)
def _categorize_programs(hass_isy_data: dict, programs: Programs) -> None:
"""Categorize the ISY994 programs."""
for platform in PROGRAM_PLATFORMS:
folder = programs.get_by_name(f"{DEFAULT_PROGRAM_STRING}{platform}")
if not folder:
continue
for dtype, _, node_id in folder.children:
if dtype != TAG_FOLDER:
continue
entity_folder = folder[node_id]
actions = None
status = entity_folder.get_by_name(KEY_STATUS)
if not status or status.protocol != PROTO_PROGRAM:
_LOGGER.warning(
"Program %s entity '%s' not loaded, invalid/missing status program",
platform,
entity_folder.name,
)
continue
if platform != BINARY_SENSOR:
actions = entity_folder.get_by_name(KEY_ACTIONS)
if not actions or actions.protocol != PROTO_PROGRAM:
_LOGGER.warning(
"Program %s entity '%s' not loaded, invalid/missing actions program",
platform,
entity_folder.name,
)
continue
entity = (entity_folder.name, status, actions)
hass_isy_data[ISY994_PROGRAMS][platform].append(entity)
def _categorize_variables(
hass_isy_data: dict, variables: Variables, identifier: str
) -> None:
"""Gather the ISY994 Variables to be added as sensors."""
try:
var_to_add = [
(vtype, vname, vid)
for (vtype, vname, vid) in variables.children
if identifier in vname
]
except KeyError as err:
_LOGGER.error("Error adding ISY Variables: %s", err)
return
for vtype, vname, vid in var_to_add:
hass_isy_data[ISY994_VARIABLES].append((vname, variables[vtype][vid]))
async def migrate_old_unique_ids(
hass: HomeAssistant, platform: str, devices: list[Any] | None
) -> None:
"""Migrate to new controller-specific unique ids."""
registry = await async_get_registry(hass)
for device in devices:
old_entity_id = registry.async_get_entity_id(
platform, DOMAIN, device.old_unique_id
)
if old_entity_id is not None:
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
device.old_unique_id,
device.unique_id,
)
registry.async_update_entity(old_entity_id, new_unique_id=device.unique_id)
old_entity_id_2 = registry.async_get_entity_id(
platform, DOMAIN, device.unique_id.replace(":", "")
)
if old_entity_id_2 is not None:
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
device.unique_id.replace(":", ""),
device.unique_id,
)
registry.async_update_entity(
old_entity_id_2, new_unique_id=device.unique_id
)
def convert_isy_value_to_hass(
value: int | float | None,
uom: str,
precision: int | str,
fallback_precision: int | None = None,
) -> float | int:
"""Fix ISY Reported Values.
ISY provides float values as an integer and precision component.
Correct by shifting the decimal place left by the value of precision.
(e.g. value=2345, prec="2" == 23.45)
Insteon Thermostats report temperature in 0.5-deg precision as an int
by sending a value of 2 times the Temp. Correct by dividing by 2 here.
"""
if value is None or value == ISY_VALUE_UNKNOWN:
return None
if uom in (UOM_DOUBLE_TEMP, UOM_ISYV4_DEGREES):
return round(float(value) / 2.0, 1)
if precision not in ("0", 0):
return round(float(value) / 10 ** int(precision), int(precision))
if fallback_precision:
return round(float(value), fallback_precision)
return value
|
{
"content_hash": "9bd1dbaf5e6d1a424613a178e89cb478",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 93,
"avg_line_length": 34.77251184834123,
"alnum_prop": 0.6212348371268911,
"repo_name": "jawilson/home-assistant",
"id": "d1790fcc13cf43785ccd72475d51ccaf20ccd582",
"size": "14674",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "homeassistant/components/isy994/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
__author__ = 'Sergey Sobko'
__email__ = 'ssobko@rbc.ru'
__copyright__ = 'Copyright 2014, RosBusinessConsulting'
import logging
def initialize_logger(logger=None):
"""Initialize logger"""
if logger:
if isinstance(logger, basestring):
return logging.getLogger(logger)
return logger
return logging.getLogger('rbc.session')
|
{
"content_hash": "05bcb52dd8fa8c06bbdb7286fffe1112",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 24.266666666666666,
"alnum_prop": 0.6648351648351648,
"repo_name": "RosBusinessConsulting/sessionclient-python",
"id": "5c82bde79875b461375587cdf087a82ae60c1c49",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ssclient/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4713"
}
],
"symlink_target": ""
}
|
"""Test Home Assistant yaml loader."""
import io
import logging
import os
import unittest
import pytest
from homeassistant.config import YAML_CONFIG_FILE, load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util.yaml as yaml
from homeassistant.util.yaml import loader as yaml_loader
from tests.async_mock import patch
from tests.common import get_test_config_dir, patch_yaml_files
@pytest.fixture(autouse=True)
def mock_credstash():
"""Mock credstash so it doesn't connect to the internet."""
with patch.object(yaml_loader, "credstash") as mock_credstash:
mock_credstash.getSecret.return_value = None
yield mock_credstash
def test_simple_list():
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["config"] == ["simple", "list"]
def test_simple_dict():
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == "value"
def test_unhashable_key():
"""Test an unhashable key."""
files = {YAML_CONFIG_FILE: "message:\n {{ states.state }}"}
with pytest.raises(HomeAssistantError), patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
def test_no_key():
"""Test item without a key."""
files = {YAML_CONFIG_FILE: "a: a\nnokeyhere"}
with pytest.raises(HomeAssistantError), patch_yaml_files(files):
yaml.load_yaml(YAML_CONFIG_FILE)
def test_environment_variable():
"""Test config file with environment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["password"] == "secret_password"
del os.environ["PASSWORD"]
def test_environment_variable_default():
"""Test config file with default value for environment variable."""
conf = "password: !env_var PASSWORD secret_password"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["password"] == "secret_password"
def test_invalid_environment_variable():
"""Test config file with no environment variable sat."""
conf = "password: !env_var PASSWORD"
with pytest.raises(HomeAssistantError):
with io.StringIO(conf) as file:
yaml_loader.yaml.safe_load(file)
def test_include_yaml():
"""Test include yaml."""
with patch_yaml_files({"test.yaml": "value"}):
conf = "key: !include test.yaml"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == "value"
with patch_yaml_files({"test.yaml": None}):
conf = "key: !include test.yaml"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == {}
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_list(mock_walk):
"""Test include dir list yaml."""
mock_walk.return_value = [["/test", [], ["two.yaml", "one.yaml"]]]
with patch_yaml_files({"/test/one.yaml": "one", "/test/two.yaml": "two"}):
conf = "key: !include_dir_list /test"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == sorted(["one", "two"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_list_recursive(mock_walk):
"""Test include dir recursive list yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["zero.yaml"]],
["/test/tmp2", [], ["one.yaml", "two.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/zero.yaml": "zero",
"/test/tmp2/one.yaml": "one",
"/test/tmp2/two.yaml": "two",
}
):
conf = "key: !include_dir_list /test"
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["zero", "one", "two"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_named(mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
["/test", [], ["first.yaml", "second.yaml", "secrets.yaml"]]
]
with patch_yaml_files({"/test/first.yaml": "one", "/test/second.yaml": "two"}):
conf = "key: !include_dir_named /test"
correct = {"first": "one", "second": "two"}
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == correct
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_named_recursive(mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["first.yaml"]],
["/test/tmp2", [], ["second.yaml", "third.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/first.yaml": "one",
"/test/tmp2/second.yaml": "two",
"/test/tmp2/third.yaml": "three",
}
):
conf = "key: !include_dir_named /test"
correct = {"first": "one", "second": "two", "third": "three"}
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert doc["key"] == correct
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_list(mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [["/test", [], ["first.yaml", "second.yaml"]]]
with patch_yaml_files(
{"/test/first.yaml": "- one", "/test/second.yaml": "- two\n- three"}
):
conf = "key: !include_dir_merge_list /test"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert sorted(doc["key"]) == sorted(["one", "two", "three"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_list_recursive(mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["first.yaml"]],
["/test/tmp2", [], ["second.yaml", "third.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/first.yaml": "- one",
"/test/tmp2/second.yaml": "- two",
"/test/tmp2/third.yaml": "- three\n- four",
}
):
conf = "key: !include_dir_merge_list /test"
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["one", "two", "three", "four"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_named(mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [["/test", [], ["first.yaml", "second.yaml"]]]
files = {
"/test/first.yaml": "key1: one",
"/test/second.yaml": "key2: two\nkey3: three",
}
with patch_yaml_files(files):
conf = "key: !include_dir_merge_named /test"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == {"key1": "one", "key2": "two", "key3": "three"}
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_named_recursive(mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["first.yaml"]],
["/test/tmp2", [], ["second.yaml", "third.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/first.yaml": "key1: one",
"/test/tmp2/second.yaml": "key2: two",
"/test/tmp2/third.yaml": "key3: three\nkey4: four",
}
):
conf = "key: !include_dir_merge_named /test"
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three",
"key4": "four",
}
@patch("homeassistant.util.yaml.loader.open", create=True)
def test_load_yaml_encoding_error(mock_open):
"""Test raising a UnicodeDecodeError."""
mock_open.side_effect = UnicodeDecodeError("", b"", 1, 0, "")
with pytest.raises(HomeAssistantError):
yaml_loader.load_yaml("test")
def test_dump():
"""The that the dump method returns empty None values."""
assert yaml.dump({"a": None, "b": "b"}) == "a:\nb: b\n"
def test_dump_unicode():
"""The that the dump method returns empty None values."""
assert yaml.dump({"a": None, "b": "привет"}) == "a:\nb: привет\n"
FILES = {}
def load_yaml(fname, string):
"""Write a string to file and return the parsed yaml."""
FILES[fname] = string
with patch_yaml_files(FILES):
return load_yaml_config_file(fname)
class FakeKeyring:
"""Fake a keyring class."""
def __init__(self, secrets_dict):
"""Store keyring dictionary."""
self._secrets = secrets_dict
# pylint: disable=protected-access
def get_password(self, domain, name):
"""Retrieve password."""
assert domain == yaml._SECRET_NAMESPACE
return self._secrets.get(name)
class TestSecrets(unittest.TestCase):
"""Test the secrets parameter in the yaml utility."""
# pylint: disable=protected-access,invalid-name
def setUp(self):
"""Create & load secrets file."""
config_dir = get_test_config_dir()
yaml.clear_secret_cache()
self._yaml_path = os.path.join(config_dir, YAML_CONFIG_FILE)
self._secret_path = os.path.join(config_dir, yaml.SECRET_YAML)
self._sub_folder_path = os.path.join(config_dir, "subFolder")
self._unrelated_path = os.path.join(config_dir, "unrelated")
load_yaml(
self._secret_path,
"http_pw: pwhttp\n"
"comp1_un: un1\n"
"comp1_pw: pw1\n"
"stale_pw: not_used\n"
"logger: debug\n",
)
self._yaml = load_yaml(
self._yaml_path,
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
def tearDown(self):
"""Clean up secrets."""
yaml.clear_secret_cache()
FILES.clear()
def test_secrets_from_yaml(self):
"""Did secrets load ok."""
expected = {"api_password": "pwhttp"}
assert expected == self._yaml["http"]
expected = {"username": "un1", "password": "pw1"}
assert expected == self._yaml["component"]
def test_secrets_from_parent_folder(self):
"""Test loading secrets from parent foler."""
expected = {"api_password": "pwhttp"}
self._yaml = load_yaml(
os.path.join(self._sub_folder_path, "sub.yaml"),
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
assert expected == self._yaml["http"]
def test_secret_overrides_parent(self):
"""Test loading current directory secret overrides the parent."""
expected = {"api_password": "override"}
load_yaml(
os.path.join(self._sub_folder_path, yaml.SECRET_YAML), "http_pw: override"
)
self._yaml = load_yaml(
os.path.join(self._sub_folder_path, "sub.yaml"),
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
assert expected == self._yaml["http"]
def test_secrets_from_unrelated_fails(self):
"""Test loading secrets from unrelated folder fails."""
load_yaml(os.path.join(self._unrelated_path, yaml.SECRET_YAML), "test: failure")
with pytest.raises(HomeAssistantError):
load_yaml(
os.path.join(self._sub_folder_path, "sub.yaml"),
"http:\n api_password: !secret test",
)
def test_secrets_keyring(self):
"""Test keyring fallback & get_password."""
yaml_loader.keyring = None # Ensure its not there
yaml_str = "http:\n api_password: !secret http_pw_keyring"
with pytest.raises(HomeAssistantError):
load_yaml(self._yaml_path, yaml_str)
yaml_loader.keyring = FakeKeyring({"http_pw_keyring": "yeah"})
_yaml = load_yaml(self._yaml_path, yaml_str)
assert {"http": {"api_password": "yeah"}} == _yaml
@patch.object(yaml_loader, "credstash")
def test_secrets_credstash(self, mock_credstash):
"""Test credstash fallback & get_password."""
mock_credstash.getSecret.return_value = "yeah"
yaml_str = "http:\n api_password: !secret http_pw_credstash"
_yaml = load_yaml(self._yaml_path, yaml_str)
log = logging.getLogger()
log.error(_yaml["http"])
assert {"api_password": "yeah"} == _yaml["http"]
def test_secrets_logger_removed(self):
"""Ensure logger: debug was removed."""
with pytest.raises(HomeAssistantError):
load_yaml(self._yaml_path, "api_password: !secret logger")
@patch("homeassistant.util.yaml.loader._LOGGER.error")
def test_bad_logger_value(self, mock_error):
"""Ensure logger: debug was removed."""
yaml.clear_secret_cache()
load_yaml(self._secret_path, "logger: info\npw: abc")
load_yaml(self._yaml_path, "api_password: !secret pw")
assert mock_error.call_count == 1, "Expected an error about logger: value"
def test_secrets_are_not_dict(self):
"""Did secrets handle non-dict file."""
FILES[
self._secret_path
] = "- http_pw: pwhttp\n comp1_un: un1\n comp1_pw: pw1\n"
yaml.clear_secret_cache()
with pytest.raises(HomeAssistantError):
load_yaml(
self._yaml_path,
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
def test_representing_yaml_loaded_data():
"""Test we can represent YAML loaded data."""
files = {YAML_CONFIG_FILE: 'key: [1, "2", 3]'}
with patch_yaml_files(files):
data = load_yaml_config_file(YAML_CONFIG_FILE)
assert yaml.dump(data) == "key:\n- 1\n- '2'\n- 3\n"
def test_duplicate_key(caplog):
"""Test duplicate dict keys."""
files = {YAML_CONFIG_FILE: "key: thing1\nkey: thing2"}
with patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
assert "contains duplicate key" in caplog.text
|
{
"content_hash": "264dda370b04d4c94b2fe73aecb4a511",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 88,
"avg_line_length": 35.00215982721382,
"alnum_prop": 0.573306182895224,
"repo_name": "robbiet480/home-assistant",
"id": "4d6f4ce3ac9168a23d6daceb340c04c28594cd01",
"size": "16218",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/util/test_yaml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""
based on https://github.com/tomchristie/django-rest-framework/blob/master/runtests.py
"""
from __future__ import print_function
import pytest
import sys
import os
import subprocess
PYTEST_ARGS = {
'default': ['tests'],
'fast': ['tests', '-q'],
}
FLAKE8_ARGS = ['rest_framework_friendly_errors', 'tests', '--ignore=E501']
sys.path.append(os.path.dirname(__file__))
def exit_on_failure(ret, message=None):
if ret:
sys.exit(ret)
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
print('flake8 failed' if ret else 'flake8 passed')
return ret
def split_class_and_function(string):
class_string, function_string = string.split('.', 1)
return "%s and %s" % (class_string, function_string)
def is_function(string):
# `True` if it looks like a test function is included in the string.
return string.startswith('test_') or '.test_' in string
def is_class(string):
# `True` if first character is uppercase - assume it's a class name.
return string[0] == string[0].upper()
if __name__ == "__main__":
try:
sys.argv.remove('--nolint')
except ValueError:
run_flake8 = True
else:
run_flake8 = False
try:
sys.argv.remove('--lintonly')
except ValueError:
run_tests = True
else:
run_tests = False
try:
sys.argv.remove('--fast')
except ValueError:
style = 'default'
else:
style = 'fast'
run_flake8 = False
if len(sys.argv) > 1:
pytest_args = sys.argv[1:]
first_arg = pytest_args[0]
if first_arg.startswith('-'):
# `runtests.py [flags]`
pytest_args = ['tests'] + pytest_args
elif is_class(first_arg) and is_function(first_arg):
# `runtests.py TestCase.test_function [flags]`
expression = split_class_and_function(first_arg)
pytest_args = ['tests', '-k', expression] + pytest_args[1:]
elif is_class(first_arg) or is_function(first_arg):
# `runtests.py TestCase [flags]`
# `runtests.py test_function [flags]`
pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:]
else:
pytest_args = PYTEST_ARGS[style]
if run_tests:
exit_on_failure(pytest.main(pytest_args))
if run_flake8:
exit_on_failure(flake8_main(FLAKE8_ARGS))
|
{
"content_hash": "59eceffda5a6b86c185d3ede3247a237",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 85,
"avg_line_length": 26.118279569892472,
"alnum_prop": 0.599011939069576,
"repo_name": "oasiswork/drf-friendly-errors",
"id": "311e57bf9fca0e0f94a74e11f43fa8475517054f",
"size": "2452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71615"
}
],
"symlink_target": ""
}
|
'''Runner for debugging with J-Link.'''
import argparse
import os
import platform
import re
import shlex
from subprocess import TimeoutExpired
import sys
import tempfile
from runners.core import ZephyrBinaryRunner, RunnerCaps, \
BuildConfiguration
try:
from packaging import version
MISSING_REQUIREMENTS = False
except ImportError:
MISSING_REQUIREMENTS = True
DEFAULT_JLINK_EXE = 'JLink.exe' if sys.platform == 'win32' else 'JLinkExe'
DEFAULT_JLINK_GDB_PORT = 2331
class ToggleAction(argparse.Action):
def __call__(self, parser, args, ignored, option):
setattr(args, self.dest, not option.startswith('--no-'))
class JLinkBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for the J-Link GDB server.'''
def __init__(self, cfg, device,
commander=DEFAULT_JLINK_EXE,
flash_addr=0x0, erase=True, reset_after_load=False,
iface='swd', speed='auto',
gdbserver='JLinkGDBServer',
gdb_host='',
gdb_port=DEFAULT_JLINK_GDB_PORT,
tui=False, tool_opt=[]):
super().__init__(cfg)
self.hex_name = cfg.hex_file
self.bin_name = cfg.bin_file
self.elf_name = cfg.elf_file
self.gdb_cmd = [cfg.gdb] if cfg.gdb else None
self.device = device
self.commander = commander
self.flash_addr = flash_addr
self.erase = erase
self.reset_after_load = reset_after_load
self.gdbserver = gdbserver
self.iface = iface
self.speed = speed
self.gdb_host = gdb_host
self.gdb_port = gdb_port
self.tui_arg = ['-tui'] if tui else []
self.tool_opt = []
for opts in [shlex.split(opt) for opt in tool_opt]:
self.tool_opt += opts
@classmethod
def name(cls):
return 'jlink'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash', 'debug', 'debugserver', 'attach'},
flash_addr=True, erase=True)
@classmethod
def do_add_parser(cls, parser):
# Required:
parser.add_argument('--device', required=True, help='device name')
# Optional:
parser.add_argument('--iface', default='swd',
help='interface to use, default is swd')
parser.add_argument('--speed', default='auto',
help='interface speed, default is autodetect')
parser.add_argument('--tui', default=False, action='store_true',
help='if given, GDB uses -tui')
parser.add_argument('--gdbserver', default='JLinkGDBServer',
help='GDB server, default is JLinkGDBServer')
parser.add_argument('--gdb-host', default='',
help='custom gdb host, defaults to the empty string '
'and runs a gdb server')
parser.add_argument('--gdb-port', default=DEFAULT_JLINK_GDB_PORT,
help='pyocd gdb port, defaults to {}'.format(
DEFAULT_JLINK_GDB_PORT))
parser.add_argument('--tool-opt', default=[], action='append',
help='''Additional options for JLink Commander,
e.g. \'-autoconnect 1\' ''')
parser.add_argument('--commander', default=DEFAULT_JLINK_EXE,
help='J-Link Commander, default is JLinkExe')
parser.add_argument('--reset-after-load', '--no-reset-after-load',
dest='reset_after_load', nargs=0,
action=ToggleAction,
help='reset after loading? (default: no)')
parser.set_defaults(reset_after_load=False)
@classmethod
def do_create(cls, cfg, args):
build_conf = BuildConfiguration(cfg.build_dir)
flash_addr = cls.get_flash_address(args, build_conf)
return JLinkBinaryRunner(cfg, args.device,
commander=args.commander,
flash_addr=flash_addr, erase=args.erase,
reset_after_load=args.reset_after_load,
iface=args.iface, speed=args.speed,
gdbserver=args.gdbserver,
gdb_host=args.gdb_host,
gdb_port=args.gdb_port,
tui=args.tui, tool_opt=args.tool_opt)
def print_gdbserver_message(self):
self.logger.info('J-Link GDB server running on port {}'.
format(self.gdb_port))
def read_version(self):
'''Read the J-Link Commander version output.
J-Link Commander does not provide neither a stand-alone version string
output nor command line parameter help output. To find the version, we
launch it using a bogus command line argument (to get it to fail) and
read the version information provided to stdout.
A timeout is used since the J-Link Commander takes up to a few seconds
to exit upon failure.'''
if platform.system() == 'Windows' or "microsoft" in platform.release().lower():
# The check below does not work on Microsoft Windows or in WSL
return ''
self.require(self.commander)
# Match "Vd.dd" substring
ver_re = re.compile(r'\s+V([.0-9]+)[a-zA-Z]*\s+', re.IGNORECASE)
cmd = ([self.commander] + ['-bogus-argument-that-does-not-exist'])
try:
self.check_output(cmd, timeout=1)
except TimeoutExpired as e:
ver_m = ver_re.search(e.output.decode('utf-8'))
if ver_m:
return ver_m.group(1)
else:
return ''
def supports_nogui(self):
ver = self.read_version()
# -nogui was introduced in J-Link Commander v6.80
return version.parse(ver) >= version.parse("6.80")
def do_run(self, command, **kwargs):
if MISSING_REQUIREMENTS:
raise RuntimeError('one or more Python dependencies were missing; '
"see the getting started guide for details on "
"how to fix")
server_cmd = ([self.gdbserver] +
['-select', 'usb', # only USB connections supported
'-port', str(self.gdb_port),
'-if', self.iface,
'-speed', self.speed,
'-device', self.device,
'-silent',
'-singlerun'] +
self.tool_opt)
if command == 'flash':
self.flash(**kwargs)
elif command == 'debugserver':
if self.gdb_host:
raise ValueError('Cannot run debugserver with --gdb-host')
self.require(self.gdbserver)
self.print_gdbserver_message()
self.check_call(server_cmd)
else:
if self.gdb_cmd is None:
raise ValueError('Cannot debug; gdb is missing')
if self.elf_name is None:
raise ValueError('Cannot debug; elf is missing')
client_cmd = (self.gdb_cmd +
self.tui_arg +
[self.elf_name] +
['-ex', 'target remote {}:{}'.format(self.gdb_host, self.gdb_port)])
if command == 'debug':
client_cmd += ['-ex', 'monitor halt',
'-ex', 'monitor reset',
'-ex', 'load']
if self.reset_after_load:
client_cmd += ['-ex', 'monitor reset']
if not self.gdb_host:
self.require(self.gdbserver)
self.print_gdbserver_message()
self.run_server_and_client(server_cmd, client_cmd)
else:
self.run_client(client_cmd)
def flash(self, **kwargs):
self.require(self.commander)
lines = ['r'] # Reset and halt the target
if self.erase:
lines.append('erase') # Erase all flash sectors
# Get the build artifact to flash, prefering .hex over .bin
if self.hex_name is not None and os.path.isfile(self.hex_name):
flash_file = self.hex_name
flash_fmt = 'loadfile {}'
elif self.bin_name is not None and os.path.isfile(self.bin_name):
flash_file = self.bin_name
flash_fmt = 'loadfile {} 0x{:x}'
else:
err = 'Cannot flash; no hex ({}) or bin ({}) files found.'
raise ValueError(err.format(self.hex_name, self.bin_name))
# Flash the selected build artifact
lines.append(flash_fmt.format(flash_file, self.flash_addr))
if self.reset_after_load:
lines.append('r') # Reset and halt the target
lines.append('g') # Start the CPU
# Reset the Debug Port CTRL/STAT register
# Under normal operation this is done automatically, but if other
# JLink tools are running, it is not performed.
# The J-Link scripting layer chains commands, meaning that writes are
# not actually performed until after the next operation. After writing
# the register, read it back to perform this flushing.
lines.append('writeDP 1 0')
lines.append('readDP 1')
lines.append('q') # Close the connection and quit
self.logger.debug('JLink commander script:')
self.logger.debug('\n'.join(lines))
# Don't use NamedTemporaryFile: the resulting file can't be
# opened again on Windows.
with tempfile.TemporaryDirectory(suffix='jlink') as d:
fname = os.path.join(d, 'runner.jlink')
with open(fname, 'wb') as f:
f.writelines(bytes(line + '\n', 'utf-8') for line in lines)
if self.supports_nogui():
nogui = ['-nogui', '1']
else:
nogui = []
cmd = ([self.commander] + nogui +
['-if', self.iface,
'-speed', self.speed,
'-device', self.device,
'-CommanderScript', fname] +
self.tool_opt)
self.logger.info('Flashing file: {}'.format(flash_file))
self.check_call(cmd)
|
{
"content_hash": "a788d0130c683ed96f21d3fe2b12f82b",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 94,
"avg_line_length": 40.38996138996139,
"alnum_prop": 0.5396233629672116,
"repo_name": "Vudentz/zephyr",
"id": "2d6e0d77986ee7e3bae50f6ffdfa6992ea3cebaa",
"size": "10539",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/west_commands/runners/jlink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "354867"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "23782049"
},
{
"name": "C++",
"bytes": "365645"
},
{
"name": "CMake",
"bytes": "574287"
},
{
"name": "EmberScript",
"bytes": "808"
},
{
"name": "HTML",
"bytes": "1631"
},
{
"name": "Haskell",
"bytes": "542"
},
{
"name": "Makefile",
"bytes": "3313"
},
{
"name": "Objective-C",
"bytes": "19541"
},
{
"name": "Perl",
"bytes": "198126"
},
{
"name": "Python",
"bytes": "1384284"
},
{
"name": "Shell",
"bytes": "75091"
},
{
"name": "SmPL",
"bytes": "19760"
},
{
"name": "Tcl",
"bytes": "3349"
},
{
"name": "VBA",
"bytes": "607"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
}
|
import Tix, os, copy
from Tkconstants import *
TCL_ALL_EVENTS = 0
def RunSample (root):
dirlist = DemoDirList(root)
dirlist.mainloop()
dirlist.destroy()
class DemoDirList:
def __init__(self, w):
self.root = w
self.exit = -1
z = w.winfo_toplevel()
z.wm_protocol("WM_DELETE_WINDOW", lambda self=self: self.quitcmd())
# Create the tixDirList and the tixLabelEntry widgets on the on the top
# of the dialog box
# bg = root.tk.eval('tix option get bg')
# adding bg=bg crashes Windows pythonw tk8.3.3 Python 2.1.0
top = Tix.Frame( w, relief=RAISED, bd=1)
# Create the DirList widget. By default it will show the current
# directory
#
#
top.dir = Tix.DirList(top)
top.dir.hlist['width'] = 40
# When the user presses the ".." button, the selected directory
# is "transferred" into the entry widget
#
top.btn = Tix.Button(top, text = " >> ", pady = 0)
# We use a LabelEntry to hold the installation directory. The user
# can choose from the DirList widget, or he can type in the directory
# manually
#
top.ent = Tix.LabelEntry(top, label="Installation Directory:",
labelside = 'top',
options = '''
entry.width 40
label.anchor w
''')
font = self.root.tk.eval('tix option get fixed_font')
# font = self.root.master.tix_option_get('fixed_font')
top.ent.entry['font'] = font
self.dlist_dir = copy.copy(os.curdir)
# This should work setting the entry's textvariable
top.ent.entry['textvariable'] = self.dlist_dir
top.btn['command'] = lambda dir=top.dir, ent=top.ent, self=self: \
self.copy_name(dir,ent)
# top.ent.entry.insert(0,'tix'+repr(self))
top.ent.entry.bind('<Return>', lambda self=self: self.okcmd () )
top.pack( expand='yes', fill='both', side=TOP)
top.dir.pack( expand=1, fill=BOTH, padx=4, pady=4, side=LEFT)
top.btn.pack( anchor='s', padx=4, pady=4, side=LEFT)
top.ent.pack( expand=1, fill=X, anchor='s', padx=4, pady=4, side=LEFT)
# Use a ButtonBox to hold the buttons.
#
box = Tix.ButtonBox (w, orientation='horizontal')
box.add ('ok', text='Ok', underline=0, width=6,
command = lambda self=self: self.okcmd () )
box.add ('cancel', text='Cancel', underline=0, width=6,
command = lambda self=self: self.quitcmd () )
box.pack( anchor='s', fill='x', side=BOTTOM)
def copy_name (self, dir, ent):
# This should work as it is the entry's textvariable
self.dlist_dir = dir.cget('value')
# but it isn't so I'll do it manually
ent.entry.delete(0,'end')
ent.entry.insert(0, self.dlist_dir)
def okcmd (self):
# tixDemo:Status "You have selected the directory" + self.dlist_dir
self.quitcmd()
def quitcmd (self):
self.exit = 0
def mainloop(self):
while self.exit < 0:
self.root.tk.dooneevent(TCL_ALL_EVENTS)
def destroy (self):
self.root.destroy()
# This "if" statement makes it possible to run this script file inside or
# outside of the main demo program "tixwidgets.py".
#
if __name__== '__main__' :
import tkMessageBox, traceback
try:
root=Tix.Tk()
RunSample(root)
except:
t, v, tb = sys.exc_info()
text = "Error running the demo script:\n"
for line in traceback.format_exception(t,v,tb):
text = text + line + '\n'
d = tkMessageBox.showerror ( 'Tix Demo Error', text)
|
{
"content_hash": "9fa5e48255dbaba9d62d6ba158de677f",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 34.3716814159292,
"alnum_prop": 0.5574150360453141,
"repo_name": "mollstam/UnrealPy",
"id": "5fd8c0de7f10b3874602e88d36dffd3cf94b542f",
"size": "4568",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Demo/tix/samples/DirList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886305"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925518"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
DESCRIPTION = 'Python Mumble for Humans™'
with open('README.md') as f:
LONG_DESCRIPTION = f.read()
VERSION = '0.1.0'
setup(
name='mumble',
version=VERSION,
packages=find_packages(),
author='Stanislav Vishnevskiy',
author_email='vishnevskiy@gmail.com',
url='https://github.com/vishnevskiy/mumblepy',
license='MIT',
include_package_data=True,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=[],
platforms=['any'],
classifiers=[],
test_suite='tests',
)
|
{
"content_hash": "976356ed353e883aa500e24f2e62e936",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 50,
"avg_line_length": 23.48,
"alnum_prop": 0.6729131175468483,
"repo_name": "vishnevskiy/mumblepy",
"id": "e135e9ecb6a8b6110018eb782f6f450ee0d80edb",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13219"
}
],
"symlink_target": ""
}
|
import ConfigParser
import sys
from impala_shell_config_defaults import impala_shell_defaults
from optparse import OptionParser
class ConfigFileFormatError(Exception):
"""Raised when the config file cannot be read by ConfigParser."""
pass
class InvalidOptionValueError(Exception):
"""Raised when an option contains an invalid value."""
pass
def parse_bool_option(value):
"""Returns True for '1' and 'True', and False for '0' and 'False'.
Throws ValueError for other values.
"""
if value.lower() in ["true", "1"]:
return True
elif value.lower() in ["false", "0"]:
return False
else:
raise InvalidOptionValueError("Unexpected value in configuration file. '" + value
+ "' is not a valid value for a boolean option.")
def parse_shell_options(options, defaults, option_list):
"""Filters unknown options and converts some values from string to their corresponding
python types (booleans and None). 'option_list' contains the list of valid options,
and 'defaults' is used to deduce the type of some options (only bool at the moment).
Returns a dictionary with option names as keys and option values as values.
"""
result = {}
option_dests = dict((opt.dest, opt) for opt in option_list)
for option, value in options:
opt = option_dests.get(option)
if opt is None:
print >> sys.stderr, "WARNING: Unable to read configuration file correctly. " \
"Ignoring unrecognized config option: '%s'\n" % option
elif isinstance(defaults.get(option), bool) or \
opt.action == "store_true" or opt.action == "store_false":
result[option] = parse_bool_option(value)
elif opt.action == "append":
result[option] = value.split(",%s=" % option)
elif value.lower() == "none":
result[option] = None
else:
result[option] = value
return result
def get_config_from_file(config_filename, option_list):
"""Reads contents of configuration file
Two config sections are supported:
"[impala]":
Overrides the defaults of the shell arguments. Unknown options are filtered
and some values are converted from string to their corresponding python types
(booleans and None).
Multiple flags are appended with ",option_name=" as its delimiter, e.g.
The delimiter is for multiple options is ,<option>=. For example:
var=msg1=hello,var=msg2=world.
Setting 'config_filename' in the config file would have no effect,
so its original value is kept.
"[impala.query_options]"
Overrides the defaults of the query options. Not validated here,
because validation will take place after connecting to impalad.
Returns a pair of dictionaries (shell_options, query_options), with option names
as keys and option values as values.
"""
config = ConfigParser.ConfigParser()
try:
config.read(config_filename)
except Exception, e:
raise ConfigFileFormatError(
"Unable to read configuration file correctly. Check formatting: %s" % e)
shell_options = {}
if config.has_section("impala"):
shell_options = parse_shell_options(config.items("impala"), impala_shell_defaults,
option_list)
if "config_file" in shell_options:
print >> sys.stderr, "WARNING: Option 'config_file' can be only set from shell."
shell_options["config_file"] = config_filename
try:
config.read(config_filename)
except Exception, e:
raise ConfigFileFormatError(
"Unable to read configuration file correctly. Check formatting: %s" % e)
query_options = {}
if config.has_section("impala.query_options"):
# Query option keys must be "normalized" to upper case before updating with
# options coming from command line.
query_options = dict(
[(k.upper(), v) for k, v in config.items("impala.query_options")])
return shell_options, query_options
def get_option_parser(defaults):
"""Creates OptionParser and adds shell options (flags)
Default values are loaded in initially
"""
parser = OptionParser()
parser.set_defaults(**defaults)
parser.add_option("-i", "--impalad", dest="impalad",
help="<host:port> of impalad to connect to \t\t")
parser.add_option("-b", "--kerberos_host_fqdn", dest="kerberos_host_fqdn",
help="If set, overrides the expected hostname of the Impalad's "
"kerberos service principal. impala-shell will check that "
"the server's principal matches this hostname. This may be "
"used when impalad is configured to be accessed via a "
"load-balancer, but it is desired for impala-shell to talk "
"to a specific impalad directly.")
parser.add_option("-q", "--query", dest="query",
help="Execute a query without the shell")
parser.add_option("-f", "--query_file", dest="query_file",
help="Execute the queries in the query file, delimited by ;."
" If the argument to -f is \"-\", then queries are read from"
" stdin and terminated with ctrl-d.")
parser.add_option("-k", "--kerberos", dest="use_kerberos",
action="store_true", help="Connect to a kerberized impalad")
parser.add_option("-o", "--output_file", dest="output_file",
help=("If set, query results are written to the "
"given file. Results from multiple semicolon-terminated "
"queries will be appended to the same file"))
parser.add_option("-B", "--delimited", dest="write_delimited",
action="store_true",
help="Output rows in delimited mode")
parser.add_option("--print_header", dest="print_header",
action="store_true",
help="Print column names in delimited mode"
" when pretty-printed.")
parser.add_option("--output_delimiter", dest="output_delimiter",
help="Field delimiter to use for output in delimited mode")
parser.add_option("-s", "--kerberos_service_name",
dest="kerberos_service_name",
help="Service name of a kerberized impalad")
parser.add_option("-V", "--verbose", dest="verbose",
action="store_true",
help="Verbose output")
parser.add_option("-p", "--show_profiles", dest="show_profiles",
action="store_true",
help="Always display query profiles after execution")
parser.add_option("--quiet", dest="verbose",
action="store_false",
help="Disable verbose output")
parser.add_option("-v", "--version", dest="version",
action="store_true",
help="Print version information")
parser.add_option("-c", "--ignore_query_failure", dest="ignore_query_failure",
action="store_true", help="Continue on query failure")
parser.add_option("-d", "--database", dest="default_db",
help="Issues a use database command on startup \t")
parser.add_option("-l", "--ldap", dest="use_ldap",
action="store_true",
help="Use LDAP to authenticate with Impala. Impala must be configured"
" to allow LDAP authentication. \t\t")
parser.add_option("-u", "--user", dest="user",
help="User to authenticate with.")
parser.add_option("--ssl", dest="ssl",
action="store_true",
help="Connect to Impala via SSL-secured connection \t")
parser.add_option("--ca_cert", dest="ca_cert",
help=("Full path to "
"certificate file used to authenticate Impala's SSL certificate."
" May either be a copy of Impala's certificate (for self-signed "
"certs) or the certificate of a trusted third-party CA. If not set, "
"but SSL is enabled, the shell will NOT verify Impala's server "
"certificate"))
parser.add_option("--config_file", dest="config_file",
help=("Specify the configuration file to load options. "
"The following sections are used: [impala], "
"[impala.query_options]. Section names are case sensitive. "
"Specifying this option within a config file will have "
"no effect. Only specify this as an option in the commandline."
))
parser.add_option("--history_file", dest="history_file",
help=("The file in which to store shell history. This may also be "
"configured using the IMPALA_HISTFILE environment variable."))
parser.add_option("--live_summary", dest="print_summary", action="store_true",
help="Print a query summary every 1s while the query is running.")
parser.add_option("--live_progress", dest="print_progress", action="store_true",
help="Print a query progress every 1s while the query is running.")
parser.add_option("--auth_creds_ok_in_clear", dest="creds_ok_in_clear",
action="store_true", help="If set, LDAP authentication " +
"may be used with an insecure connection to Impala. " +
"WARNING: Authentication credentials will therefore be sent " +
"unencrypted, and may be vulnerable to attack.")
parser.add_option("--ldap_password_cmd", dest="ldap_password_cmd",
help="Shell command to run to retrieve the LDAP password")
parser.add_option("--var", dest="keyval", action="append",
help="Defines a variable to be used within the Impala session."
" Can be used multiple times to set different variables."
" It must follow the pattern \"KEY=VALUE\","
" KEY starts with an alphabetic character and"
" contains alphanumeric characters or underscores.")
parser.add_option("-Q", "--query_option", dest="query_options", action="append",
help="Sets the default for a query option."
" Can be used multiple times to set different query options."
" It must follow the pattern \"KEY=VALUE\","
" KEY must be a valid query option. Valid query options "
" can be listed by command 'set'.")
parser.add_option("-t", "--client_connect_timeout_ms",
help="Timeout in milliseconds after which impala-shell will time out"
" if it fails to connect to Impala server. Set to 0 to disable any"
" timeout.")
# add default values to the help text
for option in parser.option_list:
# since the quiet flag is the same as the verbose flag
# we need to make sure to print the opposite value for it
# (print quiet is false since verbose is true)
if option == parser.get_option('--quiet'):
option.help += " [default: %s]" % (not defaults['verbose'])
elif option != parser.get_option('--help'):
# don't want to print default value for help
option.help += " [default: %default]"
return parser
|
{
"content_hash": "9c2655baa2856119fd49434e75395560",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 90,
"avg_line_length": 49.523809523809526,
"alnum_prop": 0.609527972027972,
"repo_name": "cloudera/Impala",
"id": "27b371ab7e9196ca542d6ad2f9b81d38d0c970e7",
"size": "12388",
"binary": false,
"copies": "1",
"ref": "refs/heads/cdh6.3.0",
"path": "shell/option_parser.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "442143"
},
{
"name": "C++",
"bytes": "13783954"
},
{
"name": "CMake",
"bytes": "203812"
},
{
"name": "CSS",
"bytes": "148115"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "5599852"
},
{
"name": "JavaScript",
"bytes": "754881"
},
{
"name": "Lex",
"bytes": "26483"
},
{
"name": "PLpgSQL",
"bytes": "3459"
},
{
"name": "Python",
"bytes": "3078349"
},
{
"name": "Roff",
"bytes": "1633"
},
{
"name": "Shell",
"bytes": "147776"
},
{
"name": "TSQL",
"bytes": "9978"
},
{
"name": "Thrift",
"bytes": "287362"
}
],
"symlink_target": ""
}
|
'''
Created on 2016/8/22
:author: hubo
'''
from vlcp.server.module import Module, call_api, depend, api
import vlcp.service.connection.httpserver as httpserver
import vlcp.service.sdn.viperflow as viperflow
from vlcp.utils.connector import TaskPool
from vlcp.utils.http import HttpHandler
from vlcp.config import defaultconfig
from email.message import Message
import json
import functools
import re
from vlcp.utils.ethernet import ip4_addr, mac_addr
from vlcp.utils.dataobject import DataObject, watch_context, dump, updater,\
set_new, ReferenceObject, request_context
import vlcp.service.kvdb.objectdb as objectdb
from vlcp.event.runnable import RoutineContainer
from namedstruct.stdprim import create_binary, uint64
from vlcp.utils.ethernet import mac_addr_bytes
from random import randint
from vlcp.utils.networkmodel import LogicalPort, SubNet, SubNetMap
from vlcp.utils.netutils import parse_ip4_network, network_first, network_last,\
ip_in_network
from uuid import uuid1
import ast
from vlcp.event.lock import Lock
from vlcp.utils.exceptions import WalkKeyNotRetrieved
from contextlib import suppress
from vlcp.utils.walkerlib import ensure_keys
import asyncio.subprocess
class DockerInfo(DataObject):
_prefix = 'viperflow.dockerplugin.portinfo'
_indices = ("id",)
LogicalPort._register_auto_remove('DockerInfo', lambda x: [DockerInfo.default_key(x.id)])
class IPAMReserve(DataObject):
_prefix = 'viperflow.dockerplugin.ipamreserve'
_indices = ("id",)
def __init__(self, prefix=None, deleted=False):
DataObject.__init__(self, prefix=prefix, deleted=deleted)
# Should be dictionary of {IP, timeout}
self.reserved_ips = {}
SubNet._register_auto_remove('IPAMReserve', lambda x: [IPAMReserve.default_key(x.docker_ipam_poolid)] \
if hasattr(x, 'docker_ipam_poolid') else [])
class IPAMPoolReserve(DataObject):
_prefix = 'viperflow.dockerplugin.ipampool'
def __init__(self, prefix=None, deleted=False):
DataObject.__init__(self, prefix=prefix, deleted=deleted)
# Should be dictionary of {PoolId: [CIDR, timeout]}
self.reserved_pools = {}
self.nextalloc = 0
class IPAMReserveMarker(DataObject):
_prefix = 'viperflow.dockerplugin.ipamreservemarker'
_indices = ("cidr",)
def _str(b, encoding = 'ascii'):
if isinstance(b, str):
return b
else:
return b.decode(encoding)
def _routeapi(path):
def decorator(func):
@HttpHandler.route(path, method = [b'POST'])
@functools.wraps(func)
async def handler(self, env):
try:
if b'content-type' in env.headerdict:
m = Message()
m['content-type'] = _str(env.headerdict[b'content-type'])
charset = m.get_content_charset('utf-8')
else:
charset = 'utf-8'
data = await env.inputstream.read(self)
params = json.loads(_str(data, charset), encoding=charset)
self._logger.debug('Call %r with parameters: %r', path, params)
r = func(self, env, params)
if r is not None:
await r
except Exception as exc:
self._logger.warning('Docker API failed with exception', exc_info = True)
env.startResponse(500)
env.outputjson({'Err': str(exc)})
return handler
return decorator
import subprocess
import random
def _create_veth(ip_command, prefix, mac_address, mtu):
last_exc = None
for _ in range(0, 3):
device_name = prefix + str(random.randrange(1, 1000000))
try:
subprocess.check_call([ip_command, "link", "add", device_name]
+ (["address", mac_address] if mac_address else [])
+ (["mtu", str(mtu)] if mtu is not None else [])
+ ["type", "veth", "peer", "name", device_name + "-tap"]
+ (["mtu", str(mtu)] if mtu is not None else []))
except Exception as exc:
last_exc = exc
else:
last_exc = None
break
else:
raise last_exc
ip_output = subprocess.check_output([ip_command, "link", "show", "dev", device_name])
m = re.search(b"link/ether ([0-9a-zA-Z:]+)", ip_output)
if not m:
raise ValueError('Cannot create interface')
mac_address = m.group(1)
subprocess.check_call([ip_command, "link", "set", device_name + "-tap", "up"])
return (device_name, mac_address)
def _delete_veth(ip_command, device_name):
subprocess.check_call([ip_command, "link", "del", device_name + "-tap"])
def _plug_ovs(ovs_command, bridge_name, device_name, port_id):
subprocess.check_call([ovs_command, "add-port", bridge_name, device_name+"-tap", "--",
"set", "interface", device_name+"-tap", "external_ids:iface-id=" + port_id])
def _unplug_ovs(ovs_command, bridge_name, device_name):
subprocess.check_call([ovs_command, "del-port", device_name+"-tap"])
class IPAMUsingException(Exception):
pass
class RetryUpdateException(Exception):
pass
_GLOBAL_SPACE = 'VLCPGlobalAddressSpace'
class NetworkPlugin(HttpHandler):
def __init__(self, parent):
HttpHandler.__init__(self, parent.scheduler, False, parent.vhostbind)
self._parent = parent
self._logger = parent._logger
self._macbase = uint64.create(create_binary(mac_addr_bytes(self._parent.mactemplate), 8))
cidrrange = parent.cidrrange
try:
subnet, mask = parse_ip4_network(cidrrange)
if not (0 <= mask <= 24):
raise ValueError
except Exception:
self._logger.warning('Invalid CIDR range: %r. Using default 10.0.0.0/8', cidrrange)
subnet = ip4_addr('10.0.0.0')
mask = 8
self.cidrrange_subnet = subnet
self.cidrrange_mask = mask
self.cidrrange_end = (1 << (24 - mask))
self.pooltimeout = parent.pooltimeout
self.iptimeout = parent.iptimeout
self._reqid = 0
@HttpHandler.route(br'/Plugin\.Activate', method = [b'POST'])
def plugin_activate(self, env):
env.outputjson({"Implements": ["NetworkDriver", "IpamDriver"]})
@HttpHandler.route(br'/IpamDriver\.GetDefaultAddressSpaces', method = [b'POST'])
def ipam_addressspace(self, env):
env.outputjson({'LocalDefaultAddressSpace': 'VLCPLocalAddressSpace',
'GlobalDefaultAddressSpace': _GLOBAL_SPACE})
@HttpHandler.route(br'/IpamDriver\.GetCapabilities', method = [b'POST'])
def ipam_capabilities(self, env):
env.outputjson({"RequiresMACAddress": False,
"RequiresRequestReplay": False})
def _remove_staled_pools(self, reservepool, timestamp):
timeouts = dict((poolid, cidr) for poolid, (cidr, timeout) in reservepool.reserved_pools.items()
if timeout is not None and timeout < timestamp)
for k in timeouts:
del reservepool.reserved_pools[k]
removed_keys = [r for poolid, cidr in timeouts.items()
for r in (IPAMReserve.default_key(poolid), IPAMReserveMarker.default_key(cidr))]
return removed_keys
@_routeapi(br'/IpamDriver\.RequestPool')
async def ipam_requestpool(self, env, params):
if params['AddressSpace'] != _GLOBAL_SPACE:
raise ValueError('Unsupported address space: must use this IPAM driver together with network driver')
if params['V6']:
raise ValueError('IPv6 is not supported')
new_pool = IPAMReserve.create_instance(uuid1().hex)
new_pool.pool = params['Pool']
if new_pool.pool:
subnet, mask = parse_ip4_network(new_pool.pool)
new_pool.pool = ip4_addr.formatter(subnet) + '/' + str(mask)
new_pool.subpool = params['SubPool']
if new_pool.subpool:
subnet, mask = parse_ip4_network(new_pool.subpool)
new_pool.subpool = ip4_addr.formatter(subnet) + '/' + str(mask)
new_pool.options = params['Options']
if new_pool.pool:
l = Lock(('dockerplugin_ipam_request_pool', new_pool.pool), self.scheduler)
await l.lock(self)
else:
l = None
try:
while True:
fail = 0
rets = []
def _updater(keys, values, timestamp):
reservepool = values[0]
reserve_new_pool = set_new(values[1], new_pool)
remove_keys = self._remove_staled_pools(reservepool, timestamp)
used_cidrs = set(cidr for _, (cidr, _) in reservepool.reserved_pools.items())
if not reserve_new_pool.pool:
# pool is not specified
for _ in range(0, self.cidrrange_end):
reservepool.nextalloc += 1
if reservepool.nextalloc >= self.cidrrange_end:
reservepool.nextalloc = 0
new_subnet = self.cidrrange_subnet | (reservepool.nextalloc << 8)
new_cidr = ip4_addr.formatter(new_subnet) + '/24'
if new_cidr not in used_cidrs:
break
reserve_new_pool.pool = new_cidr
reserve_new_pool.subpool = ''
rets[:] = [reserve_new_pool.pool]
if reserve_new_pool.pool in used_cidrs:
# We must wait until this CIDR is released
raise IPAMUsingException
reservepool.reserved_pools[reserve_new_pool.id] = \
[reserve_new_pool.pool,
timestamp + self.pooltimeout * 1000000]
marker = IPAMReserveMarker.create_instance(reserve_new_pool.pool)
if marker.getkey() in remove_keys:
remove_keys.remove(marker.getkey())
return (tuple(keys[0:2]) + tuple(remove_keys),
(reservepool, reserve_new_pool) + (None,) * len(remove_keys))
else:
return (tuple(keys[0:2]) + (marker.getkey(),) + tuple(remove_keys),
(reservepool, reserve_new_pool, marker) + (None,) * len(remove_keys))
try:
await call_api(self, 'objectdb', 'transact', {'keys': (IPAMPoolReserve.default_key(),
new_pool.getkey()),
'updater': _updater,
'withtime': True})
except IPAMUsingException:
# Wait for the CIDR to be released
self._reqid += 1
fail += 1
reqid = ('dockerplugin_ipam', self._reqid)
marker_key = IPAMReserveMarker.default_key(rets[0])
with request_context(reqid, self):
retvalue = await call_api(self, 'objectdb', 'get', {'key': marker_key,
'requestid': reqid,
'nostale': True})
if retvalue is not None and not retvalue.isdeleted():
await self.execute_with_timeout(self.pooltimeout, retvalue.waitif(self, lambda x: x.isdeleted()))
else:
env.outputjson({'PoolID': new_pool.id,
'Pool': rets[0],
'Data': {}})
break
finally:
if l is not None:
l.unlock()
@_routeapi(br'/IpamDriver\.ReleasePool')
async def ipam_releasepool(self, env, params):
poolid = params['PoolID']
def _updater(keys, values, timestamp):
# There are two situations for Release Pool:
# 1. The pool is already used to create a network, in this situation, the pool should be
# released with the network removal
# 2. The pool has not been used for network creation, we should release it from the reservation
reservepool = values[0]
removed_keys = self._remove_staled_pools(reservepool, timestamp)
if poolid in reservepool.reserved_pools:
removed_keys.append(IPAMReserve.default_key(poolid))
removed_keys.append(IPAMReserveMarker.default_key(reservepool.reserved_pools[poolid][0]))
del reservepool.reserved_pools[poolid]
return ((keys[0],) + tuple(removed_keys), (values[0],) + (None,) * len(removed_keys))
await call_api(self, 'objectdb', 'transact', {'keys': (IPAMPoolReserve.default_key(),),
'updater': _updater,
'withtime': True})
env.outputjson({})
def _remove_staled_ips(self, pool, timestamp):
pool.reserved_ips = dict((addr, ts) for addr,ts in pool.reserved_ips.items()
if ts is None or ts >= timestamp)
@_routeapi(br'/IpamDriver\.RequestAddress')
async def ipam_requestaddress(self, env, params):
poolid = params['PoolID']
address = params['Address']
if address:
address = ip4_addr.formatter(ip4_addr(address))
reserve_key = IPAMReserve.default_key(poolid)
rets = []
def walker(walk, write, timestamp):
with suppress(WalkKeyNotRetrieved):
pool = walk(reserve_key)
if pool is None:
raise ValueError('PoolID %r does not exist' % (poolid,))
self._remove_staled_ips(pool, timestamp)
if hasattr(pool, 'subnetmap'):
subnetmap_key = pool.subnetmap.getkey()
subnet_key = SubNetMap._subnet.rightkey(subnetmap_key)
ensure_keys(walk, subnet_key, subnetmap_key)
subnetmap = walk(subnetmap_key)
subnet = walk(subnet_key)
else:
subnetmap = None
subnet = None
if address:
# check ip_address in cidr
if address in pool.reserved_ips:
raise ValueError("IP address " + address + " has been reserved")
if pool.subpool:
cidr = pool.subpool
else:
cidr = pool.pool
network, mask = parse_ip4_network(cidr)
addr_num = ip4_addr(address)
if not ip_in_network(addr_num, network, mask):
raise ValueError('IP address ' + address + " is not in the network CIDR")
if subnetmap is not None:
start = ip4_addr(subnet.allocated_start)
end = ip4_addr(subnet.allocated_end)
try:
assert start <= addr_num <= end
if hasattr(subnet, 'gateway'):
assert addr_num != ip4_addr(subnet.gateway)
except Exception:
raise ValueError("specified IP address " + address + " is not valid")
if str(addr_num) in subnetmap.allocated_ips:
raise ValueError("IP address " + address + " has been used")
new_address = address
else:
# allocated ip_address from cidr
gateway = None
cidr = pool.pool
if pool.subpool:
cidr = pool.subpool
network, prefix = parse_ip4_network(cidr)
start = network_first(network, prefix)
end = network_last(network, prefix)
if subnetmap is not None:
start = max(start, ip4_addr(subnet.allocated_start))
end = min(end, ip4_addr(subnet.allocated_end))
if hasattr(subnet, "gateway"):
gateway = ip4_addr(subnet.gateway)
for ip_address in range(start,end):
new_address = ip4_addr.formatter(ip_address)
if ip_address != gateway and \
(subnetmap is None or str(ip_address) not in subnetmap.allocated_ips) and \
new_address not in pool.reserved_ips:
break
else:
raise ValueError("No available IP address can be used")
pool.reserved_ips[new_address] = timestamp + self.iptimeout * 1000000
_, mask = parse_ip4_network(pool.pool)
rets[:] = [new_address + '/' + str(mask)]
write(pool.getkey(), pool)
await call_api(self, 'objectdb', 'writewalk', {'keys': (reserve_key,),
'walker': walker,
'withtime': True})
env.outputjson({'Address': rets[0], 'Data': {}})
@_routeapi(br'/IpamDriver\.ReleaseAddress')
async def ipam_releaseaddress(self, env, params):
poolid = params['PoolID']
address = params['Address']
address = ip4_addr.formatter(ip4_addr(address))
def _updater(keys, values, timestamp):
pool = values[0]
if pool is None:
return ((), ())
self._remove_staled_ips(pool, timestamp)
if address in pool.reserved_ips:
del pool.reserved_ips[address]
return ((keys[0],), (pool,))
await call_api(self, 'objectdb', 'transact', {'keys': (IPAMReserve.default_key(poolid),),
'updater': _updater,
'withtime': True})
env.outputjson({})
@HttpHandler.route(br'/NetworkDriver\.GetCapabilities', method = [b'POST'])
def getcapabilities(self, env):
env.outputjson({"Scope": "global"})
@_routeapi(br'/NetworkDriver\.CreateNetwork')
async def createnetwork(self, env, params):
lognet_id = 'docker-' + params['NetworkID'] + '-lognet'
subnet_id = 'docker-' + params['NetworkID'] + '-subnet'
network_params = {'id': lognet_id}
if params['IPv4Data'] and 'Gateway' in params['IPv4Data'][0]:
gateway, _, _ = params['IPv4Data'][0]['Gateway'].partition('/')
else:
gateway = None
if params['IPv4Data'] and params['IPv4Data'][0]['AddressSpace'] == _GLOBAL_SPACE:
request_cidr = params['IPv4Data'][0]['Pool']
docker_ipam_poolid = True
async def _ipam_work():
# Using network driver together with IPAM driver
rets = []
def _ipam_stage(keys, values, timestamp):
reservepool = values[0]
removed_keys = self._remove_staled_pools(reservepool, timestamp)
poolids = [poolid for poolid, (cidr, _) in reservepool.reserved_pools.items()
if cidr == request_cidr]
if not poolids:
raise ValueError('Pool %r is not reserved by VLCP IPAM plugin' % (request_cidr,))
docker_ipam_poolid = poolids[0]
rets[:] = [docker_ipam_poolid]
removed_keys.append(IPAMReserveMarker.default_key(reservepool.reserved_pools[docker_ipam_poolid][0]))
del reservepool.reserved_pools[docker_ipam_poolid]
return ((keys[0],) + tuple(removed_keys), (reservepool,) + (None,) * len(removed_keys))
try:
await call_api(self, 'objectdb', 'transact', {'keys': (IPAMPoolReserve.default_key(),),
'updater': _ipam_stage,
'withtime': True})
return (True, rets[0])
except Exception as exc:
return (False, exc)
else:
docker_ipam_poolid = None
async def _cleanup_ipam(docker_ipam_poolid):
@updater
def _remove_reserve(pool):
return (None,)
await call_api(self, 'objectdb', 'transact', {'keys': (IPAMReserve.default_key(docker_ipam_poolid),),
'updater': _remove_reserve})
if 'Options' in params and 'com.docker.network.generic' in params['Options']:
for k,v in params['Options']['com.docker.network.generic'].items():
if k.startswith('subnet:'):
pass
elif k in ('mtu','vlanid','vni'):
network_params[k] = int(v)
elif v[:1] == '`' and v[-1:] == '`':
try:
network_params[k] = ast.literal_eval(v[1:-1])
except Exception:
network_params[k] = v
else:
network_params[k] = v
async def _create_lognet():
try:
await call_api(self, 'viperflow', 'createlogicalnetwork', network_params)
except Exception as exc:
return exc
else:
return None
if docker_ipam_poolid:
(ipam_succ, ipam_result), create_lognet_exc = \
await self.execute_all([_ipam_work(),
_create_lognet()], self)
if not ipam_succ:
if create_lognet_exc is None:
try:
await call_api(self, 'viperflow', 'deletelogicalnetwork', {'id': lognet_id})
except Exception:
pass
raise ipam_result
elif create_lognet_exc is not None:
self.subroutine(_cleanup_ipam(ipam_result))
raise create_lognet_exc
else:
docker_ipam_poolid = ipam_result
async def _ipam_work2():
def _ipam_stage2(keys, values, timestamp):
pool = values[0]
pool.subnetmap = ReferenceObject(SubNetMap.default_key(subnet_id))
self._remove_staled_ips(pool, timestamp)
if gateway is not None and gateway in pool.reserved_ips:
# Reserve forever
pool.reserved_ips[gateway] = None
return ((keys[0],), (pool,))
await call_api(self, 'objectdb', 'transact', {'keys': (IPAMReserve.default_key(docker_ipam_poolid),),
'updater': _ipam_stage2,
'withtime': True})
else:
await _create_lognet()
subnet_params = {'logicalnetwork': lognet_id,
'cidr': params['IPv4Data'][0]['Pool'],
'id': subnet_id}
if gateway is not None:
subnet_params['gateway'] = gateway
if 'Options' in params and 'com.docker.network.generic' in params['Options']:
for k,v in params['Options']['com.docker.network.generic'].items():
if k.startswith('subnet:'):
subnet_key = k[len('subnet:'):]
if subnet_key == 'disablegateway':
try:
del subnet_params['gateway']
except KeyError:
pass
elif v[:1] == '`' and v[-1:] == '`':
try:
subnet_params[subnet_key] = ast.literal_eval(v[1:-1])
except Exception:
subnet_params[subnet_key] = v
else:
subnet_params[subnet_key] = v
if docker_ipam_poolid is not None:
subnet_params['docker_ipam_poolid'] = docker_ipam_poolid
async def _create_subnet():
await call_api(self, 'viperflow', 'createsubnet', subnet_params)
if docker_ipam_poolid:
routines = [_ipam_work2(), _create_subnet()]
else:
routines = [_create_subnet()]
try:
await self.execute_all(routines)
except Exception as exc:
if docker_ipam_poolid is not None:
self.subroutine(_cleanup_ipam(docker_ipam_poolid))
try:
await call_api(self, 'viperflow', 'deletesubnet', {'id': subnet_id})
except Exception:
pass
try:
await call_api(self, 'viperflow', 'deletelogicalnetwork', {'id': lognet_id})
except Exception:
pass
raise exc
env.outputjson({})
@_routeapi(br'/NetworkDriver\.DeleteNetwork')
async def deletenetwork(self, env, params):
await call_api(self, 'viperflow', 'deletesubnet', {'id': 'docker-' + params['NetworkID'] + '-subnet'})
await call_api(self, 'viperflow', 'deletelogicalnetwork', {'id': 'docker-' + params['NetworkID'] + '-lognet'})
@_routeapi(br'/NetworkDriver\.CreateEndpoint')
async def createendpoint(self, env, params):
lognet_id = 'docker-' + params['NetworkID'] + '-lognet'
subnet_id = 'docker-' + params['NetworkID'] + '-subnet'
logport_id = 'docker-' + params['EndpointID']
logport_params = {}
if 'Options' in params:
logport_params.update(params['Options'])
logport_params['id'] = logport_id
logport_params['logicalnetwork'] = lognet_id
logport_params['subnet'] = subnet_id
mac_address = None
if 'Interface' in params:
interface = params['Interface']
if 'Address' in interface and interface['Address']:
ip,f,prefix = interface['Address'].partition('/')
logport_params['ip_address'] = ip
else:
ip = None
if 'MacAddress' in interface and interface['MacAddress']:
logport_params['mac_address'] = interface['MacAddress']
mac_address = interface['MacAddress']
if mac_address is None:
# Generate a MAC address
if ip:
# Generate MAC address based on IP address
mac_num = self._macbase
mac_num ^= ((hash(subnet_id) & 0xffffffff) << 8)
mac_num ^= ip4_addr(ip)
else:
# Generate MAC address based on Port ID and random number
mac_num = self._macbase
mac_num ^= ((hash(logport_id) & 0xffffffff) << 8)
mac_num ^= randint(0, 0xffffffff)
mac_address = mac_addr_bytes.formatter(create_binary(mac_num, 6))
logport_params['mac_address'] = mac_address
try:
retvalue = await call_api(self, 'viperflow', 'createlogicalport', logport_params)
except Exception as exc:
# There is an issue that docker daemon may not delete an endpoint correctly
# If autoremoveports is enabled, we remove the logical port automatically
# Note that created veth and Openvswitch ports are not cleared because they
# may not on this server, so you must clean them yourself with vlcp_docker.cleanup
if self._parent.autoremoveports:
retvalue = await call_api(self, 'viperflow', 'listlogicalports', {'logicalnetwork': lognet_id,
'ip_address': ip})
if retvalue:
if retvalue[0]['id'].startswith('docker-'):
dup_pid = retvalue[0]['id']
self._logger.warning('Duplicated ports detected: %s (%s). Will remove it.',
dup_pid,
retvalue[0]['ip_address'])
await call_api(self, 'viperflow', 'deletelogicalport', {'id': dup_pid})
# Retry create logical port
retvalue = await call_api(self, 'viperflow', 'createlogicalport', logport_params)
else:
self._logger.warning('Duplicated with a non-docker port')
raise exc
else:
raise exc
else:
raise exc
ip_address = retvalue[0]['ip_address']
subnet_cidr = retvalue[0]['subnet']['cidr']
mtu = retvalue[0]['network'].get('mtu', self._parent.mtu)
_, _, prefix = subnet_cidr.partition('/')
if 'docker_ipam_poolid' in retvalue[0]['subnet']:
docker_ipam_poolid = retvalue[0]['subnet']['docker_ipam_poolid']
async def _remove_ip_reservation():
try:
# The reservation is completed, remove the temporary reservation
def _ipam_updater(keys, values, timestamp):
pool = values[0]
self._remove_staled_ips(pool, timestamp)
if ip_address in pool.reserved_ips:
del pool.reserved_ips[ip_address]
return ((keys[0],), (pool,))
await call_api(self, 'objectdb', 'transact',
{'keys': (IPAMReserve.default_key(docker_ipam_poolid),),
'updater': _ipam_updater,
'withtime': True})
except Exception:
self._logger.warning('Unexpected exception while removing reservation of IP address %r, will ignore and continue',
ip_address, exc_info = True)
self.subroutine(_remove_ip_reservation())
port_created = False
try:
device_name, _ = await self._parent.taskpool.run_task(self, lambda: _create_veth(self._parent.ipcommand,
self._parent.vethprefix,
mac_address,
mtu))
port_created = True
info = DockerInfo.create_instance(logport_id)
info.docker_port = device_name
@updater
def _updater(dockerinfo):
dockerinfo = set_new(dockerinfo, info)
return (dockerinfo,)
await call_api(self, 'objectdb', 'transact', {'keys': (info.getkey(),),
'updater': _updater})
await self._parent.taskpool.run_task(self, lambda: _plug_ovs(self._parent.ovscommand,
self._parent.ovsbridge,
device_name,
logport_id))
result = {'Interface': {}}
if 'MacAddress' not in interface:
result['Interface']['MacAddress'] = mac_address
if 'Address' not in interface:
result['Interface']['Address'] = ip_address + '/' + prefix
env.outputjson(result)
except Exception as exc:
try:
if port_created:
await self._parent.taskpool.run_task(self, lambda: _delete_veth(self._parent.ipcommand,
device_name))
except Exception:
pass
try:
await call_api(self, 'viperflow', 'deletelogicalport', {'id': logport_id})
except Exception:
pass
raise exc
@_routeapi(br'/NetworkDriver\.EndpointOperInfo')
def operinfo(self, env, params):
env.outputjson({'Value':{}})
@_routeapi(br'/NetworkDriver\.DeleteEndpoint')
async def delete_endpoint(self, env, params):
logport_id = 'docker-' + params['EndpointID']
dockerinfo = await call_api(self, 'dockerplugin', 'getdockerinfo', {'portid': logport_id})
if not dockerinfo or not dockerinfo[0]:
raise KeyError(repr(params['EndpointID']) + ' not found')
dockerinfo_result = dockerinfo[0]
docker_port = dockerinfo_result['docker_port']
def _unplug_port(ovs_command = self._parent.ovscommand,
bridge_name = self._parent.ovsbridge,
device_name = docker_port,
ip_command = self._parent.ipcommand):
try:
_unplug_ovs(ovs_command, bridge_name, device_name)
except Exception:
self._logger.warning('Remove veth from OpenvSwitch failed', exc_info = True)
try:
_delete_veth(ip_command, device_name)
except Exception:
self._logger.warning('Delete veth failed', exc_info = True)
await self._parent.taskpool.run_task(self, _unplug_port)
await call_api(self, 'viperflow', 'deletelogicalport', {'id': logport_id})
env.outputjson({})
@_routeapi(br'/NetworkDriver\.Join')
async def endpoint_join(self, env, params):
logport_id = 'docker-' + params['EndpointID']
logport_results, dockerinfo_results = \
await self.execute_all([call_api(self, 'viperflow', 'listlogicalports', {'id': logport_id}),
call_api(self, 'dockerplugin', 'getdockerinfo', {'portid': logport_id})])
if not logport_results:
raise KeyError(repr(params['EndpointID']) + ' not found')
logport_result = logport_results[0]
if dockerinfo_results:
docker_port = dockerinfo_results[0]['docker_port']
else:
docker_port = logport_result['docker_port']
result = {'InterfaceName': {'SrcName': docker_port,
'DstPrefix': self._parent.dstprefix}}
if 'subnet' in logport_result:
subnet = logport_result['subnet']
if 'gateway' in subnet:
result['Gateway'] = subnet['gateway']
if 'host_routes' in subnet:
try:
def generate_route(r):
r_g = {'Destination': r[0]}
if ip4_addr(r[1]) == 0:
r_g['RouteType'] = 1
else:
r_g['RouteType'] = 0
r_g['NextHop'] = r[1]
return r_g
result['StaticRoutes'] = [generate_route(r)
for r in subnet['host_routes']]
except Exception:
self._logger.warning('Generate static routes failed', exc_info = True)
sandboxkey = params['SandboxKey']
@updater
def _updater(dockerinfo):
if dockerinfo is None:
return ()
else:
dockerinfo.docker_sandbox = sandboxkey
return (dockerinfo,)
await call_api(self, 'objectdb', 'transact', {'keys': [DockerInfo.default_key(logport_id)],
'updater': _updater})
env.outputjson(result)
@_routeapi(br'/NetworkDriver\.Leave')
async def endpoint_leave(self, env, params):
logport_id = 'docker-' + params['EndpointID']
@updater
def _updater(dockerinfo):
if dockerinfo is None:
return ()
else:
dockerinfo.docker_sandbox = None
return (dockerinfo,)
await call_api(self, 'objectdb', 'transact', {'keys': [DockerInfo.default_key(logport_id)],
'updater': _updater})
env.outputjson({})
@_routeapi(br'/NetworkDriver\.DiscoverNew')
def discover_new(self, env, params):
env.outputjson({})
@_routeapi(br'/NetworkDriver\.DiscoverDelete')
def discover_delete(self, env, params):
env.outputjson({})
@defaultconfig
@depend(httpserver.HttpServer, viperflow.ViperFlow, objectdb.ObjectDB)
class DockerPlugin(Module):
'''
Integrate VLCP with Docker
'''
# Bind Docker API EndPoint (a HTTP service) to specified vHost
_default_vhostbind = 'docker'
# OpenvSwitch bridge used in this server
_default_ovsbridge = 'dockerbr0'
# Auto-created veth device prefix
_default_vethprefix = 'vlcp'
# Path to ``ip`` command
_default_ipcommand = 'ip'
# Path to ``ovs-vsctl`` command
_default_ovscommand = 'ovs-vsctl'
# vNIC prefix used in the docker container
_default_dstprefix = 'eth'
# A template MAC address used on generating MAC addresses
_default_mactemplate = '02:00:00:00:00:00'
# Default MTU used for networks
_default_mtu = 1500
# Try to remove the old port if it is not clean up correctly
_default_autoremoveports = True
# IPAM pool reserve timeout. If a reserved pool is not used to
# create a network till timeout, it is automatically released.
_default_pooltimeout = 60
# IPAM IP reserve timeout. If an IP address is not used to
# create an endpoint till timeout, it is automatically released.
_default_iptimeout = 60
# The default address space used when subnet is not specified.
# A C-class (like 10.0.1.0/24) subnet will be assigned for each
# network.
_default_cidrrange = '10.0.0.0/8'
def __init__(self, server):
Module.__init__(self, server)
taskpool = TaskPool(self.scheduler)
self.taskpool = taskpool
self.routines.append(self.taskpool)
self.routines.append(NetworkPlugin(self))
self.apiroutine = RoutineContainer(self.scheduler)
self._reqid = 0
self.createAPI(api(self.getdockerinfo, self.apiroutine))
async def load(self, container):
@updater
def init_ipam(poolreserve):
if poolreserve is None:
return (IPAMPoolReserve(),)
else:
return ()
await call_api(container, 'objectdb', 'transact', {'keys': (IPAMPoolReserve.default_key(),),
'updater': init_ipam})
await Module.load(self, container)
async def _dumpkeys(self, keys):
self._reqid += 1
reqid = ('dockerplugin',self._reqid)
with request_context(reqid, self.apiroutine):
retobjs = await call_api(self.apiroutine,'objectdb','mget',{'keys':keys,'requestid':reqid})
return [dump(v) for v in retobjs]
async def getdockerinfo(self, portid):
"Get docker info for specified port"
if not isinstance(portid, str) and hasattr(portid, '__iter__'):
return await self._dumpkeys([DockerInfo.default_key(p) for p in portid])
else:
return await self._dumpkeys([DockerInfo.default_key(portid)])
|
{
"content_hash": "5281a697b3365ec97f3f11170583fa9c",
"timestamp": "",
"source": "github",
"line_count": 833,
"max_line_length": 134,
"avg_line_length": 48.48859543817527,
"alnum_prop": 0.5171201505285831,
"repo_name": "hubo1016/vlcp-docker-plugin",
"id": "68a10e5f6781e9196e2fd831c1a3e9641400e6ce",
"size": "40391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vlcp_docker/dockerplugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69480"
}
],
"symlink_target": ""
}
|
from ...command import SubCommand
from ...wsgi import WSGIApplication
from ...compat import text_type, raw_input, urlparse
import sys
import json
try:
import readline
except ImportError:
pass
class Email(SubCommand):
"""Manage email"""
help = "manage email"
def add_arguments(self, parser):
def add_common(parser):
parser.add_argument(
"-l",
"--location",
dest="location",
default=None,
metavar="PATH",
help="location of the Moya server code",
)
parser.add_argument(
"-i",
"--ini",
dest="settings",
default=None,
metavar="SETTINGSPATH",
help="relative path to settings file",
)
subparsers = parser.add_subparsers(
title="email sub-commands", dest="email_subcommand", help="sub-command help"
)
add_common(
subparsers.add_parser(
"list",
help="list smtp servers",
description="display a list of smtp servers in the project",
)
)
add_common(
subparsers.add_parser(
"check",
help="check smtp servers",
description="list smtp servers and check connectivity",
)
)
add_common(
subparsers.add_parser(
"test",
help="send a test email",
description="send a test email (useful for debugging)",
)
)
render_parser = subparsers.add_parser(
"render",
help="render an email",
description="render and email to the console",
)
add_common(render_parser)
render_parser.add_argument(
dest="emailelement", metavar="ELEMENTREF", help="email element to render"
)
render_parser.add_argument(
"--text", dest="text", action="store_true", help="render email text"
)
render_parser.add_argument(
"--html", dest="html", action="store_true", help="render email html"
)
render_parser.add_argument(
"-b",
"--open-in-browser",
dest="open",
action="store_true",
help="open the email in the browser",
)
render_parser.add_argument(
"--let", dest="params", nargs="*", help="parameters in the form foo=bar"
)
render_parser.add_argument(
"--data",
dest="datafile",
default=None,
help="path to JSON file containing email template data",
)
render_parser.add_argument(
"--url",
dest="url",
default="http://127.0.0.1:8000",
help="emulate email sent from this URL",
)
return parser
def run(self):
getattr(self, "sub_" + self.args.email_subcommand)()
def sub_list(self):
application = WSGIApplication(
self.location,
self.get_settings(),
disable_autoreload=True,
master_settings=self.master_settings,
)
archive = application.archive
from ...console import Cell
table = [
(
Cell("name", bold=True),
Cell("default?", bold=True),
Cell("host", bold=True),
Cell("port", bold=True),
Cell("username", bold=True),
Cell("password", bold=True),
)
]
for k, server in sorted(archive.mail_servers.items()):
table.append(
[
k,
"yes" if server.default else "no",
server.host,
server.port,
server.username or "",
server.password or "",
]
)
self.console.table(table)
def sub_check(self):
application = WSGIApplication(self.location, self.get_settings())
archive = application.archive
from ...console import Cell
table = [
(
Cell("name", bold=True),
Cell("host", bold=True),
Cell("port", bold=True),
Cell("status", bold=True),
)
]
for k, server in sorted(archive.mail_servers.items()):
try:
server.check()
except Exception as e:
status = Cell(text_type(e), bold=True, fg="red")
else:
status = Cell("OK", bold=True, fg="green")
table.append([k, server.host, server.port, status])
self.console.table(table)
def sub_test(self):
application = WSGIApplication(self.location, self.get_settings())
archive = application.archive
servers = ", ".join(archive.mail_servers.keys())
server_name = raw_input("Which server? ({}) ".format(servers))
if not server_name:
server_name = archive.mail_servers.keys()[0]
to = raw_input("To email: ")
_from = raw_input("From email: ")
subject = raw_input("Subject: ")
body = raw_input("Body: ")
from moya.mail import Email
email = Email()
email.set_from(_from)
email.add_to(to)
email.subject = subject
email.text = body
server = archive.mail_servers[server_name]
self.console.div()
self.console.text(
"Sending mail with server {}".format(server), fg="black", bold=True
)
server.send(email, fail_silently=False)
self.console.text("Email was sent successfully", fg="green", bold=True)
def sub_render(self):
application = WSGIApplication(
self.location, self.get_settings(), disable_autoreload=True
)
archive = application.archive
args = self.args
try:
app, element = archive.get_element(args.emailelement)
except Exception as e:
self.error(text_type(e))
return -1
params = {}
if args.params:
for p in args.params:
if "=" not in p:
sys.stderr.write(
"{} is not in the form <name>=<expression>\n".format(p)
)
return -1
k, v = p.split("=", 1)
params[k] = v
if args.datafile:
try:
with open(args.datafile, "rb") as f:
td_json = f.read()
except IOError as e:
self.error(e)
return -1
td = json.loads(td_json)
params.update(td)
from moya.mail import Email
from moya.context import Context
email = Email(data=params)
email.app = app
email.subject = "Render Email"
email.email_element = element
url_parsed = urlparse(args.url)
host = "{}://{}".format(url_parsed.scheme, url_parsed.netloc)
context = Context()
archive.populate_context(context)
context[".app"] = app
from moya.request import MoyaRequest
request = MoyaRequest.blank(args.url)
application.server._populate_context(archive, context, request)
application.server.set_site(archive, context, request)
context.root["settings"] = archive.settings
email_callable = archive.get_callable_from_element(element, app=app)
try:
email_callable(context, app=email.app, email=email)
except Exception as e:
if hasattr(e, "__moyaconsole__"):
e.__moyaconsole__(self.console)
return -1
raise
if not args.html and not args.text:
table = []
table.append(["text", email.text])
table.append(["html", email.html])
self.console.table(table)
elif args.text:
self.console(email.text)
else:
self.console(email.html)
if args.open:
import webbrowser
import tempfile
path = tempfile.mktemp(prefix="moyaemail", suffix=".html")
with open(path, "wt") as f:
f.write(email.html)
webbrowser.open("file://{}".format(path))
|
{
"content_hash": "7556ea0058670e7c4ce5aefe640eae98",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 88,
"avg_line_length": 30.66906474820144,
"alnum_prop": 0.5025803424818203,
"repo_name": "moyaproject/moya",
"id": "900f3eb007b0e95a3afb416905124c0dc47dfb24",
"size": "8526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moya/command/sub/mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "662"
},
{
"name": "CSS",
"bytes": "98490"
},
{
"name": "Genshi",
"bytes": "949"
},
{
"name": "HTML",
"bytes": "14279826"
},
{
"name": "JavaScript",
"bytes": "369773"
},
{
"name": "Myghty",
"bytes": "774"
},
{
"name": "Python",
"bytes": "1828220"
},
{
"name": "Shell",
"bytes": "165"
},
{
"name": "Smalltalk",
"bytes": "154"
}
],
"symlink_target": ""
}
|
"""Common code tests."""
from datetime import timedelta
from pyHS100 import SmartDeviceException
from homeassistant.components.tplink.common import async_add_entities_retry
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import MagicMock
async def test_async_add_entities_retry(hass: HomeAssistantType):
"""Test interval callback."""
async_add_entities_callback = MagicMock()
# The objects that will be passed to async_add_entities_callback.
objects = ["Object 1", "Object 2", "Object 3", "Object 4"]
# For each call to async_add_entities_callback, the following side effects
# will be triggered in order. This set of side effects accuratley simulates
# 3 attempts to add all entities while also handling several return types.
# To help understand what's going on, a comment exists describing what the
# object list looks like throughout the iterations.
callback_side_effects = [
# OB1, OB2, OB3, OB4
False,
False,
True, # Object 3
False,
# OB1, OB2, OB4
True, # Object 1
SmartDeviceException("My error"),
False,
# OB2, OB4
True, # Object 2
True, # Object 4
]
callback = MagicMock(side_effect=callback_side_effects)
await async_add_entities_retry(
hass,
async_add_entities_callback,
objects,
callback,
interval=timedelta(milliseconds=100),
)
await hass.async_block_till_done()
assert callback.call_count == len(callback_side_effects)
async def test_async_add_entities_retry_cancel(hass: HomeAssistantType):
"""Test interval callback."""
async_add_entities_callback = MagicMock()
callback_side_effects = [
False,
False,
True, # Object 1
False,
True, # Object 2
SmartDeviceException("My error"),
False,
True, # Object 3
True, # Object 4
]
callback = MagicMock(side_effect=callback_side_effects)
objects = ["Object 1", "Object 2", "Object 3", "Object 4"]
cancel = await async_add_entities_retry(
hass,
async_add_entities_callback,
objects,
callback,
interval=timedelta(milliseconds=100),
)
cancel()
await hass.async_block_till_done()
assert callback.call_count == 4
|
{
"content_hash": "59732b762c41126ba6092e1f51e4107a",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 28.975609756097562,
"alnum_prop": 0.6443602693602694,
"repo_name": "pschmitt/home-assistant",
"id": "a2bd7ef87ff2b1db9f0154a11f1e4240a9336ada",
"size": "2376",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/tplink/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
}
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test tunneling based on SNI
'''
# Define default ATS
ts = Test.MakeATSProcess("ts", command="traffic_manager", enable_tls=True)
server_foo = Test.MakeOriginServer("server_foo", ssl=True)
server_bar = Test.MakeOriginServer("server_bar", ssl=True)
server2 = Test.MakeOriginServer("server2")
dns = Test.MakeDNServer("dns")
request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_foo_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "foo ok"}
response_bar_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "bar ok"}
server_foo.addResponse("sessionlog.json", request_foo_header, response_foo_header)
server_bar.addResponse("sessionlog.json", request_bar_header, response_bar_header)
# add ssl materials like key, certificates for the server
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.addSSLfile("ssl/signer.key")
dns.addRecords(records={"localhost": ["127.0.0.1"]})
dns.addRecords(records={"one.testmatch": ["127.0.0.1"]})
dns.addRecords(records={"two.example.one": ["127.0.0.1"]})
# Need no remap rules. Everything should be processed by sni
# Make sure the TS server certs are different from the origin certs
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key'
)
# Case 1, global config policy=permissive properties=signature
# override for foo.com policy=enforced properties=all
ts.Disk.records_config.update({'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.connect_ports': '{0} {1} {2}'.format(ts.Variables.ssl_port,
server_foo.Variables.SSL_Port,
server_bar.Variables.SSL_Port),
'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.CA.cert.filename': 'signer.pem',
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL'})
# foo.com should not terminate. Just tunnel to server_foo
# bar.com should terminate. Forward its tcp stream to server_bar
# empty SNI should tunnel to server_bar
ts.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: foo.com',
" tunnel_route: localhost:{0}".format(server_foo.Variables.SSL_Port),
"- fqdn: bob.*.com",
" tunnel_route: localhost:{0}".format(server_foo.Variables.SSL_Port),
"- fqdn: '*.match.com'",
" tunnel_route: $1.testmatch:{0}".format(server_foo.Variables.SSL_Port),
"- fqdn: '*.ok.*.com'",
" tunnel_route: $2.example.$1:{0}".format(server_foo.Variables.SSL_Port),
"- fqdn: ''", # No SNI sent
" tunnel_route: localhost:{0}".format(server_bar.Variables.SSL_Port)
])
tr = Test.AddTestRun("foo.com Tunnel-test")
tr.Processes.Default.Command = "curl -v --resolve 'foo.com:{0}:127.0.0.1' -k https://foo.com:{0}".format(ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.Processes.Default.StartBefore(server_foo)
tr.Processes.Default.StartBefore(server_bar)
tr.Processes.Default.StartBefore(dns)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression(
"Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("CN=foo.com", "Should not TLS terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("foo ok", "Should get a response from bar")
tr = Test.AddTestRun("bob.bar.com Tunnel-test")
tr.Processes.Default.Command = "curl -v --resolve 'bob.bar.com:{0}:127.0.0.1' -k https://bob.bar.com:{0}".format(
ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression(
"Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("CN=foo.com", "Should not TLS terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("foo ok", "Should get a response from bar")
tr = Test.AddTestRun("bar.com no Tunnel-test")
tr.Processes.Default.Command = "curl -v --resolve 'bar.com:{0}:127.0.0.1' -k https://bar.com:{0}".format(ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("Not Found on Accelerato", "Terminates on on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("ATS", "Terminate on Traffic Server")
tr = Test.AddTestRun("no SNI Tunnel-test")
tr.Processes.Default.Command = "curl -v -k https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression(
"Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("bar ok", "Should get a response from bar")
tr = Test.AddTestRun("one.match.com Tunnel-test")
tr.Processes.Default.Command = "curl -vvv --resolve 'one.match.com:{0}:127.0.0.1' -k https://one.match.com:{0}".format(
ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression(
"Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("CN=foo.com", "Should not TLS terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("foo ok", "Should get a response from tm")
tr = Test.AddTestRun("one.ok.two.com Tunnel-test")
tr.Processes.Default.Command = "curl -vvv --resolve 'one.ok.two.com:{0}:127.0.0.1' -k https:/one.ok.two.com:{0}".format(
ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression(
"Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("CN=foo.com", "Should not TLS terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("foo ok", "Should get a response from tm")
# Update sni file and reload
tr = Test.AddTestRun("Update config files")
# Update the SNI config
snipath = ts.Disk.sni_yaml.AbsPath
recordspath = ts.Disk.records_config.AbsPath
tr.Disk.File(snipath, id="sni_yaml", typename="ats:config"),
tr.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: bar.com',
' tunnel_route: localhost:{0}'.format(server_bar.Variables.SSL_Port),
])
tr.StillRunningAfter = ts
tr.StillRunningAfter = server_foo
tr.StillRunningAfter = server_bar
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = 'echo Updated configs'
tr.Processes.Default.ReturnCode = 0
trreload = Test.AddTestRun("Reload config")
trreload.StillRunningAfter = ts
trreload.StillRunningAfter = server_foo
trreload.StillRunningAfter = server_bar
trreload.Processes.Default.Command = 'traffic_ctl config reload'
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
trreload.Processes.Default.Env = ts.Env
trreload.Processes.Default.ReturnCode = 0
# Should terminate on traffic_server (not tunnel)
tr = Test.AddTestRun("foo.com no Tunnel-test")
tr.StillRunningAfter = ts
# Wait for the reload to complete by running the sni_reload_done test
tr.Processes.Default.StartBefore(server2, ready=When.FileContains(ts.Disk.diags_log.Name, 'sni.yaml finished loading', 2))
tr.Processes.Default.Command = "curl -v --resolve 'foo.com:{0}:127.0.0.1' -k https://foo.com:{0}".format(ts.Variables.ssl_port)
tr.Processes.Default.Streams.All += Testers.ContainsExpression("Not Found on Accelerato", "Terminates on on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("ATS", "Terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.TimeOut = 30
# Should tunnel to server_bar
tr = Test.AddTestRun("bar.com Tunnel-test")
tr.Processes.Default.Command = "curl -v --resolve 'bar.com:{0}:127.0.0.1' -k https://bar.com:{0}".format(ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", "Terminates on on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("bar ok", "Should get a response from bar")
|
{
"content_hash": "46a4696b9d69986d7449d880c8292e88",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 130,
"avg_line_length": 58.56872037914692,
"alnum_prop": 0.7219614824405244,
"repo_name": "duke8253/trafficserver",
"id": "f35f554a0d7f932ada28c64d2ba633478ce99cf1",
"size": "12358",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/gold_tests/tls/tls_tunnel.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1439145"
},
{
"name": "C++",
"bytes": "17151214"
},
{
"name": "CMake",
"bytes": "13151"
},
{
"name": "Dockerfile",
"bytes": "6693"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "Lua",
"bytes": "64412"
},
{
"name": "M4",
"bytes": "222699"
},
{
"name": "Makefile",
"bytes": "255682"
},
{
"name": "Objective-C",
"bytes": "13042"
},
{
"name": "Perl",
"bytes": "128436"
},
{
"name": "Python",
"bytes": "1561539"
},
{
"name": "Rust",
"bytes": "2591"
},
{
"name": "SWIG",
"bytes": "25777"
},
{
"name": "Shell",
"bytes": "178570"
},
{
"name": "Starlark",
"bytes": "987"
},
{
"name": "Vim Script",
"bytes": "192"
}
],
"symlink_target": ""
}
|
import os
import frappe
from frappe.core.doctype.data_import.data_import import export_json, import_doc
from frappe.desk.form.save import savedocs
from frappe.model.delete_doc import delete_doc
from frappe.tests.utils import FrappeTestCase
class TestFixtureImport(FrappeTestCase):
def create_new_doctype(self, DocType: str) -> None:
file = frappe.get_app_path("frappe", "custom", "fixtures", f"{DocType}.json")
file = open(file)
doc = file.read()
file.close()
savedocs(doc, "Save")
def insert_dummy_data_and_export(self, DocType: str, dummy_name_list: list[str]) -> str:
for name in dummy_name_list:
doc = frappe.get_doc({"doctype": DocType, "member_name": name})
doc.insert()
path_to_exported_fixtures = os.path.join(os.getcwd(), f"{DocType}_data.json")
export_json(DocType, path_to_exported_fixtures)
return path_to_exported_fixtures
def test_fixtures_import(self):
self.assertFalse(frappe.db.exists("DocType", "temp_doctype"))
self.create_new_doctype("temp_doctype")
dummy_name_list = ["jhon", "jane"]
path_to_exported_fixtures = self.insert_dummy_data_and_export("temp_doctype", dummy_name_list)
frappe.db.truncate("temp_doctype")
import_doc(path_to_exported_fixtures)
delete_doc("DocType", "temp_doctype", delete_permanently=True)
os.remove(path_to_exported_fixtures)
self.assertEqual(frappe.db.count("temp_doctype"), len(dummy_name_list))
data = frappe.get_all("temp_doctype", "member_name")
frappe.db.truncate("temp_doctype")
imported_data = set()
for item in data:
imported_data.add(item["member_name"])
self.assertEqual(set(dummy_name_list), imported_data)
def test_singles_fixtures_import(self):
self.assertFalse(frappe.db.exists("DocType", "temp_singles"))
self.create_new_doctype("temp_singles")
dummy_name_list = ["Phoebe"]
path_to_exported_fixtures = self.insert_dummy_data_and_export("temp_singles", dummy_name_list)
singles_doctype = frappe.qb.DocType("Singles")
truncate_query = (
frappe.qb.from_(singles_doctype).delete().where(singles_doctype.doctype == "temp_singles")
)
truncate_query.run()
import_doc(path_to_exported_fixtures)
delete_doc("DocType", "temp_singles", delete_permanently=True)
os.remove(path_to_exported_fixtures)
data = frappe.db.get_single_value("temp_singles", "member_name")
truncate_query.run()
self.assertEqual(data, dummy_name_list[0])
|
{
"content_hash": "de35e6df8afb26600c264a004405dfbc",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 96,
"avg_line_length": 30.705128205128204,
"alnum_prop": 0.7219206680584551,
"repo_name": "StrellaGroup/frappe",
"id": "b9bd4550b25e1920870ae8986f2845eef59bb0ee",
"size": "2395",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/tests/test_fixture_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250858"
},
{
"name": "JavaScript",
"bytes": "2515308"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3605011"
},
{
"name": "SCSS",
"bytes": "261492"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
}
|
import statistics
import math
import os
def convert_ipaddress(ipint):
"""Function for converting a 32 bit integer to a human readable ip address
https://geekdeck.wordpress.com/2010/01/19/converting-a-decimal-number-to-ip-address-in-python/
:param ipint: 32 bit int ip address
:type ipint: integer
:return: human readable ip address
"""
ipint = int(ipint)
ip=""
for i in range(4):
ip1 = ""
for j in range(8):
ip1=str(ipint % 2)+ip1
ipint = ipint >> 1
ip = str(int(ip1,2)) + "." + ip
ip = ip.strip(".")
return ip
def filter(line):
"""Function to filter bytes from a given line.
:param line: a given string
:type line: bytes
"""
line = line.replace(b'\xff',bytes('','utf-8')).replace(b'\xfe',bytes('','utf-8'))
return line
def data_statistics(data):
"""Calculates mean and stdev for the given data.
:param data: data dictionary
:type data: dictionary
"""
mean = statistics.mean(data)
if len(data) >= 2:
stdev = statistics.stdev(data)
else:
stdev = 0
return mean, stdev
def pythagoras(x1, y1, x2, y2):
"""Calculates the distance between two points.
:param x1: x coordinate
:type x1: integer
:param y1: y coordinate
:type y1: integer
:param x2: x coordinate
:type x2: integer
:param y2: y coordinate
:type y2: integer
:return: distance between the two points
"""
x_diff = x1 - x2
y_diff = y1 - y2
distance = math.pow(x_diff,2) + math.pow(y_diff, 2)
distance = math.sqrt(distance)
return distance
def check_accept(flags, signatures, signature, x, y):
"""Function to check if the given x and y falls inside of the given signature.
:param signatures: dictionary of signatures
:type signatures: dictionary
:param signature: selected signature
:type signature: string
:param x: x coordinate
:type x: int
:param y: y coordinate
:type y: int
:return: accepted boolean
"""
accepted = False
if flags['packets'] == True and flags['bytes'] == True and\
(x >= float(signatures[signature]['packets_low']) and x <= float(signatures[signature]['packets_high'])) and\
(y >= float(signatures[signature]['bytes_low']) and y <= float(signatures[signature]['bytes_high'])):
accepted = True
elif flags['packets'] == True and flags['bytes'] == False and\
(x >= float(signatures[signature]['packets_low']) and x <= float(signatures[signature]['packets_high'])):
accepted = True
elif flags['packets'] == False and flags['bytes'] == True and\
(y >= float(signatures[signature]['bytes_low']) and y <= float(signatures[signature]['bytes_high'])):
accepted = True
elif flags['packets'] == False and flags['bytes'] == False and\
(x >= float(signatures[signature]['packets_low']) and x <= float(signatures[signature]['packets_high'])):
accepted = True
return accepted
def nfdump_file_notation(nfdump_files):
"""Converts our file notation to nfdump file notation.
:param nfdump_files: specifies either a single file or a range of files in a directory
:type nfdump_files: string
:return: nfdump file notation
"""
nfdump_files = sorted(nfdump_files)
if len(nfdump_files) > 1:
begin = nfdump_files[0]
end = nfdump_files[-1]
begin = os.path.split(begin)
basedir = begin[0]
begin = begin[1]
end = os.path.split(end)[1]
nfdump_notation = "-R {0}/{1}:{2}".format(basedir, begin, end)
else:
nfdump_notation = "-r {0}".format(nfdump_files[0])
return nfdump_notation
def time_statistics(*time):
"""Calculates time statistics. Time is a list of variable length of unix timestamps.
This function calculates the total duration (first value is assumed beginning, last value is assumed end).
And from there it calculates the time spent in each phase.
:param *time: unit time stamps
:type *time: int
:return: pre-formatted line
"""
diff = []
percentages = []
total_time = time[-1] - time[0]
for i, item in enumerate(time):
if i > 0:
diff_time = item - time[i-1]
diff.append(diff_time)
percent = (diff_time / total_time)*100
percentages.append(percent)
total = "Total run time: {0} seconds.".format(total_time)
percentage = ""
for i, item in enumerate(percentages):
if percentage == "":
percentage = "{0:.3f}% of the time was in phase {{{1}}}".format(item, i)
else:
percentage = "{0}, {1:.3f}% of the time was in {{{2}}} phase".format(percentage, item, i)
line = "\n".join([total, percentage])
return line
def automation_signatures(signatures, config):
"""Function for converting configured signatures into numbers 'main.py' understands.
:param signatures: dictionary of available signatures
:type signatures: dictionary
:param config: configured signatures
:type config: string
:return: a string of signatures main.py understands
"""
config = config.replace(" ", "").split(",")
numbers = []
for i,signature in enumerate(sorted(signatures)):
if signature in config:
numbers.append(str(i+1))
numbers = ",".join(numbers)
return numbers
|
{
"content_hash": "35c131e5f9569fe68e09a4158e2a48ff",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 117,
"avg_line_length": 30.813253012048193,
"alnum_prop": 0.6566959921798632,
"repo_name": "ut-dacs/https-ids",
"id": "51e57e129b8fe5b27f57213e88ff693672b77657",
"size": "5256",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7095"
},
{
"name": "Python",
"bytes": "187118"
},
{
"name": "Shell",
"bytes": "6716"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .diffusion import create_mrtrix_dti_pipeline
from .connectivity_mapping import create_connectivity_pipeline
from .group_connectivity import (create_group_connectivity_pipeline)
|
{
"content_hash": "c5e15f115e7b011a22f409656beb1550",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 68,
"avg_line_length": 55.25,
"alnum_prop": 0.8371040723981901,
"repo_name": "FCP-INDI/nipype",
"id": "044007abb1d402dbe98deed3871def2046cb3397",
"size": "221",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "nipype/workflows/dmri/mrtrix/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5280923"
},
{
"name": "Shell",
"bytes": "1958"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
"""Tests for numpy_utils."""
from etils import enp
import jax
import jax.numpy as jnp
import numpy as np
import pytest
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
# Activate the fixture
set_tnp = enp.testing.set_tnp
def fn(x):
xnp = enp.get_np_module(x)
y = xnp.sum(x) + x.mean()
return x + y
def test_lazy():
lazy = enp.numpy_utils.lazy
assert lazy.has_tf
assert lazy.has_jax
assert lazy.tf is tf
assert lazy.jax is jax
assert lazy.jnp is jnp
assert lazy.np is np
assert lazy.is_array(np.array([123]))
assert lazy.is_np(np.array([123]))
assert not lazy.is_np(jnp.array([123]))
assert lazy.is_array(tf.constant([123]))
assert lazy.is_tf(tf.constant([123]))
assert not lazy.is_tf(np.array([123]))
assert lazy.is_array(jnp.array([123]))
assert lazy.is_jax(jnp.array([123]))
assert not lazy.is_jax(np.array([123]))
assert lazy.get_xnp(jnp.array([123])) is jnp
assert lazy.get_xnp(tf.constant([123])) is tnp
assert lazy.get_xnp(np.array([123])) is np
assert lazy.get_xnp([123], strict=False) is np
with pytest.raises(TypeError, match='Cannot infer the numpy'):
lazy.get_xnp([123])
with pytest.raises(TypeError, match='Cannot infer the numpy'):
lazy.get_xnp(None, strict=False)
def test_lazy_dtype():
lazy = enp.numpy_utils.lazy
assert lazy.is_np_dtype(np.int32)
assert lazy.is_np_dtype(np.dtype('int32'))
assert lazy.is_jax_dtype(jnp.int32)
assert lazy.is_jax_dtype(np.int32)
assert lazy.is_tf_dtype(tf.int32)
assert not lazy.is_np_dtype(tf.int32)
assert not lazy.is_jax_dtype(tf.int32)
assert not lazy.is_tf_dtype(np.int32)
assert not lazy.is_np_dtype(int)
assert not lazy.is_jax_dtype(int)
assert not lazy.is_tf_dtype(int)
assert lazy.as_dtype(tf.int32) == np.dtype('int32')
assert lazy.as_dtype(jnp.int32) == np.dtype('int32')
assert lazy.as_dtype(np.int32) == np.dtype('int32')
assert lazy.as_dtype(np.dtype('int32')) == np.dtype('int32')
with pytest.raises(TypeError, match='Invalid dtype'):
lazy.as_dtype(123)
def test_dtype_from_array_builtins():
lazy = enp.numpy_utils.lazy
assert lazy.dtype_from_array(True, strict=False) == np.dtype('bool')
assert lazy.dtype_from_array(123, strict=False) is None
assert lazy.dtype_from_array(123.0, strict=False) is None
assert lazy.dtype_from_array([123.0], strict=False) is None
with pytest.raises(TypeError, match='Cannot extract dtype'):
lazy.dtype_from_array(123)
with pytest.raises(TypeError, match='Cannot extract dtype'):
lazy.dtype_from_array(True)
with pytest.raises(TypeError, match='Cannot extract dtype'):
lazy.dtype_from_array(123.0)
with pytest.raises(TypeError, match='Cannot extract dtype'):
lazy.dtype_from_array([123.0])
@pytest.mark.parametrize(
'dtype',
[
np.uint8,
np.int32,
np.int64,
np.float32,
np.float64,
np.bool_,
jnp.bfloat16,
],
)
@enp.testing.parametrize_xnp()
def test_dtype_from_array_xnp(xnp, dtype):
lazy = enp.numpy_utils.lazy
# jnp auto-cast float64 -> float32
assert not jax.config.jax_enable_x64
target_dtype = dtype
if xnp is jnp:
target_dtype = (
{
np.float64: np.float32,
np.int64: np.int32,
}
).get(dtype, dtype)
x = xnp.array([1, 2], dtype=dtype)
assert lazy.dtype_from_array(x) == target_dtype
@enp.testing.parametrize_xnp()
def test_get_array_module(xnp):
y = fn(xnp.array([123]))
assert isinstance(y, xnp.ndarray)
def test_get_array_module_tf():
y = fn(tf.constant([123]))
assert isinstance(y, tnp.ndarray)
@pytest.mark.parametrize('xnp', [np, jnp])
def test_not_array_str(xnp):
x = xnp.array([123])
assert enp.is_array(x)
assert not enp.is_array_str(x)
assert not enp.is_dtype_str(x.dtype)
_STR_DTYPES = [
np.dtype('<U3'),
np.dtype('<S3'),
np.str_,
np.bytes_,
str,
bytes,
object,
]
_STR_ARRAYS = [
np.array(['abc', 'def']),
np.array([b'abc', b'def']),
np.array(['abc', 'def'], dtype=object),
np.array([b'abc', b'def'], dtype=object),
]
@pytest.mark.parametrize('array', _STR_ARRAYS)
def test_array_str(array):
assert enp.is_array(array)
assert enp.is_array_str(array)
assert enp.is_dtype_str(array.dtype)
def test_array_str_scalar():
assert enp.is_array_str('abc')
assert enp.is_array_str(b'abc')
@pytest.mark.parametrize('dtype', _STR_DTYPES)
def test_is_dtype_str(dtype):
assert enp.is_dtype_str(dtype)
@pytest.mark.parametrize(
'dtype',
[
np.dtype(int),
np.int64,
int,
],
)
def test_is_not_dtype_str(dtype):
assert not enp.is_dtype_str(dtype)
@pytest.mark.parametrize('array', _STR_ARRAYS)
def test_normalize_bytes2str(array):
assert np.array_equal(
enp.normalize_bytes2str(array),
np.array(['abc', 'def']),
)
def test_normalize_bytes2str_static():
assert enp.normalize_bytes2str('abc') == 'abc'
assert enp.normalize_bytes2str(b'abc') == 'abc'
assert isinstance(enp.normalize_bytes2str('abc'), str)
assert isinstance(enp.normalize_bytes2str(b'abc'), str)
assert enp.normalize_bytes2str(123) == 123
assert np.array_equal(
enp.normalize_bytes2str(np.array([123, 456])),
np.array([123, 456]),
)
assert isinstance(enp.normalize_bytes2str(jnp.array([1, 2, 3])), jnp.ndarray)
|
{
"content_hash": "844cd1801330db259592367142ee3b01",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 24.438356164383563,
"alnum_prop": 0.6631165919282511,
"repo_name": "google/etils",
"id": "cccb0a1ed6320a36f4a635344853e454e27e9ab8",
"size": "5935",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "etils/enp/numpy_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "304785"
}
],
"symlink_target": ""
}
|
"""Utilities for Keras classes with v1 and v2 versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.util import lazy_loader
# TODO(b/134426265): Switch back to single-quotes once the issue
# with copybara is fixed.
# pylint: disable=g-inconsistent-quotes
training = lazy_loader.LazyLoader(
"training", globals(),
"tensorflow.python.keras.engine.training")
training_v1 = lazy_loader.LazyLoader(
"training_v1", globals(),
"tensorflow.python.keras.engine.training_v1")
base_layer = lazy_loader.LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
base_layer_v1 = lazy_loader.LazyLoader(
"base_layer_v1", globals(),
"tensorflow.python.keras.engine.base_layer_v1")
callbacks = lazy_loader.LazyLoader(
"callbacks", globals(),
"tensorflow.python.keras.callbacks")
callbacks_v1 = lazy_loader.LazyLoader(
"callbacks_v1", globals(),
"tensorflow.python.keras.callbacks_v1")
# pylint: enable=g-inconsistent-quotes
class ModelVersionSelector(object):
"""Chooses between Keras v1 and v2 Model class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
eager_enabled = ops.executing_eagerly_outside_functions()
cls = swap_class(cls, training.Model, training_v1.Model, eager_enabled)
return super(ModelVersionSelector, cls).__new__(cls)
class LayerVersionSelector(object):
"""Chooses between Keras v1 and v2 Layer class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
eager_enabled = ops.executing_eagerly_outside_functions()
cls = swap_class(cls, base_layer.Layer, base_layer_v1.Layer, eager_enabled)
return super(LayerVersionSelector, cls).__new__(cls)
class TensorBoardVersionSelector(object):
"""Chooses between Keras v1 and v2 TensorBoard callback class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
eager_enabled = ops.executing_eagerly_outside_functions()
start_cls = cls
cls = swap_class(start_cls, callbacks.TensorBoard, callbacks_v1.TensorBoard,
eager_enabled)
if start_cls == callbacks_v1.TensorBoard and cls == callbacks.TensorBoard:
# Since the v2 class is not a subclass of the v1 class, __init__ has to
# be called manually.
return cls(*args, **kwargs)
return super(TensorBoardVersionSelector, cls).__new__(cls)
def swap_class(cls, v2_cls, v1_cls, eager_enabled):
"""Swaps in v2_cls or v1_cls depending on graph mode."""
if cls == object:
return cls
if cls in (v2_cls, v1_cls):
if eager_enabled:
return v2_cls
return v1_cls
# Recursively search superclasses to swap in the right Keras class.
cls.__bases__ = tuple(
swap_class(base, v2_cls, v1_cls, eager_enabled) for base in cls.__bases__)
return cls
def disallow_legacy_graph(cls_name, method_name):
if not ops.executing_eagerly_outside_functions():
error_msg = (
"Calling `{cls_name}.{method_name}` in graph mode is not supported "
"when the `{cls_name}` instance was constructed with eager mode "
"enabled. Please construct your `{cls_name}` instance in graph mode or"
" call `{cls_name}.{method_name}` with eager mode enabled.")
error_msg = error_msg.format(cls_name=cls_name, method_name=method_name)
raise ValueError(error_msg)
def is_v1_layer_or_model(obj):
return isinstance(obj, (base_layer_v1.Layer, training_v1.Model))
|
{
"content_hash": "829934b2e61f3fbe88556b2a980f30c6",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 37.145833333333336,
"alnum_prop": 0.7002243409983174,
"repo_name": "gunan/tensorflow",
"id": "551a07d242285a80537381dc04cb5322744c1838",
"size": "4290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/utils/version_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
'''This module is the foundation that allows users to write PyVows-style tests.
'''
# pyVows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 Bernardo Heynemann heynemann@gmail.com
class VowsInternalError(Exception):
'''Raised whenever PyVows internal code does something unexpected.'''
def __init__(self, *args):
if not isinstance(args[0], str):
raise TypeError('VowsInternalError must be instantiated with a string as the first argument')
if not len(args) >= 2:
raise IndexError('VowsInternalError must receive at least 2 arguments')
self.raw_msg = args[0]
self.args = args[1:]
def __str__(self):
msg = self.raw_msg.format(*self.args)
msg += '''
Help PyVows fix this issue! Tell us what happened:
https://github.com/heynemann/pyvows/issues/new
'''
return msg
|
{
"content_hash": "e820a34305237dcbc4ffefe6bc2e5e2a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 105,
"avg_line_length": 31.09375,
"alnum_prop": 0.6562814070351759,
"repo_name": "heynemann/pyvows",
"id": "3fc1d3bf3482f5fe88719ed46d4c4b344b801270",
"size": "1019",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyvows/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "401"
},
{
"name": "Python",
"bytes": "193545"
}
],
"symlink_target": ""
}
|
{
"name":u'CoMPS',
'version': '1.0',
'category': u'Saúde',
'depends':[],
'data':[
'views/qweb/comps_template.xml',
'security/comps_security.xml',
'security/ir.model.access.csv',
'views/cadastro_usuarios_view.xml',
'views/cadastro_escola_view.xml',
'views/cadastro_avaliador_view.xml',
'views/cadastro_aluno_view.xml',
'views/avaliacao_imc_view.xml',
"views/avaliacao_abdominal_view.xml",
"views/avaliacao_perimetro_view.xml",
"views/avaliacao_dobras_cutaneas_view.xml",
"views/avaliacao_impulsao_horizontal_view.xml",
"views/avaliacao_sentar_alcancar_view.xml",
"views/avaliacao_preensao_manual_view.xml",
"views/avaliacao_lancamento_unilateral_view.xml",
"views/avaliacao_lancamento_simultaneo_view.xml",
"views/avaliacao_corrida_25m_view.xml",
"views/avaliacao_shutllerun_10x5m_view.xml",
"views/avaliacao_shutllerun_20m_view.xml",
"views/avaliacao_equilibrio_retaguarda_view.xml",
"views/avaliacao_saltos_laterais_view.xml",
"views/avaliacao_transposicao_lateral_view.xml",
"views/avaliacao_saltos_monopedais_perna_direita_view.xml",
"views/avaliacoes_pendentes_view.xml",
],
'css': [
'static/src/css/*.css',
],
'js': [
'static/src/js/*.js'
],
'installable': True,
'application': True,
'auto_install': False,
}
|
{
"content_hash": "eedde612e72bb4cbc766e36d91d1d0fe",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 71,
"avg_line_length": 38.829268292682926,
"alnum_prop": 0.5722361809045227,
"repo_name": "diogocs1/comps",
"id": "33340fe0a83bf5f0537db77d36796931d6943b18",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/addons/COMPS/__openerp__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
}
|
import os
import cv2
from gxic_rit.konan import Detector
TEST_IMAGE = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'example.jpg'
)
if __name__ == '__main__':
detector = Detector(None, 0.2)
image = cv2.imread(TEST_IMAGE)
frame, peoples = detector.detect(image)
cv2.imwrite('example.result.jpg', frame)
|
{
"content_hash": "9284ca5545647459e701a7179b47beb9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 48,
"avg_line_length": 19.166666666666668,
"alnum_prop": 0.6492753623188405,
"repo_name": "GXIC-Real-Intelligence-Team/surveillance-core",
"id": "ba2d90ef4082c8f3df15616ceb0a65ee04a101e4",
"size": "369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gxic_rit/konan/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "29799"
},
{
"name": "Python",
"bytes": "28602"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
}
|
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is an Edge Ratio calculator for single issue
#May be deprecated see ERatioSingleIssueDonchianTrendIII.py
#N Period Edge Ratio Computation
#Import modules
from YahooGrabber import YahooGrabber
import numpy as np
import time as t
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
#Empty data structures
tempdf = pd.DataFrame()
edgelist = []
#Variable assignment
ticker = 'UVXY'
#For ATR + MFE/MFA
atrwindow = 20
#For signal generation
lag = 5
#How many days to calculate e-ratio for
LengthOfTest = range(2, 120) #(2,3) = 1 day Eratio // assuming fill on close
#Request data
Asset = YahooGrabber(ticker)
#In sample trimmer
Asset = Asset[:]
#Make column that represents X axis
Asset['Index'] = Asset.index
#Format for mpl
Asset['IndexToNumber'] = Asset['Index'].apply(mdates.date2num)
#Format Dataframe to feed candlestick_ohlc()
AssetCopy = Asset[['IndexToNumber', 'Open', 'High', 'Low', 'Close', 'Adj Close']].copy()
#Timer begin
start = t.time()
#Calculate log returns
Asset['LogRet'] = np.log(Asset['Adj Close']/Asset['Adj Close'].shift(1))
Asset['LogRet'] = Asset['LogRet'].fillna(0)
#Index copies
Asset['Index'] = Asset.index
Asset['RangeIndex'] = range(1, len(Asset.index) + 1)
#ROC calculation
Asset['RateOfChange'] = (Asset['Adj Close'] - Asset['Adj Close'].shift(lag)
) / Asset['Adj Close'].shift(lag)
#ATR calculation
Asset['Method1'] = Asset['High'] - Asset['Low']
Asset['Method2'] = abs((Asset['High'] - Asset['Close'].shift(1)))
Asset['Method3'] = abs((Asset['Low'] - Asset['Close'].shift(1)))
Asset['Method1'] = Asset['Method1'].fillna(0)
Asset['Method2'] = Asset['Method2'].fillna(0)
Asset['Method3'] = Asset['Method3'].fillna(0)
Asset['TrueRange'] = Asset[['Method1','Method2','Method3']].max(axis = 1)
#ATR in points not %
Asset['AverageTrueRangePoints'] = Asset['TrueRange'].rolling(window = atrwindow,
center=False).mean()
#ATR in percent
Asset['AverageTrueRangePercent'] = Asset['AverageTrueRangePoints'] / Asset['Close']
#Signal generation; if ROC is not calculated stay out of market
Asset['Regime'] = np.where(Asset['RateOfChange'] > .15, -1, 0)
#If ATR is not calculated stay out of market
Asset['Regime'] = np.where(Asset['AverageTrueRangePercent'] > 0, Asset['Regime'], 0)
#Find trade date when regime changes
Asset['OriginalTrade'] = 0
Asset['OriginalTrade'].loc[(Asset['Regime'].shift(1) != Asset['Regime']) & (Asset['Regime'] == -1)] = -1
Asset['OriginalTrade'].loc[(Asset['Regime'].shift(1) != Asset['Regime']) & (Asset['Regime'] == 1)] = 1
#Make list of Original Trade DATES
tradedates = Asset[['OriginalTrade', 'Index', 'RangeIndex', 'Adj Close', 'AverageTrueRangePoints']].loc[(
Asset['OriginalTrade'] != 0)]
#Number of signals generated
numsignals = len(tradedates)
#Column assignment
tradedates['MFEpoints'] = 0
tradedates['MAEpoints'] = 0
#For number of e-ratio days to compute
for z in LengthOfTest:
#For each value of RangeIndex on Tradedate
for i in tradedates.RangeIndex:
#Assign computation space
tempdf = pd.DataFrame()
#Assign entry price
entryprice = tradedates['Adj Close'].loc[tradedates['RangeIndex'] == i][0]
#Take H, L, C, sample data for number of days under study post trade
tempdf['Close'] = Asset['Close'].loc[Asset.index[i:i+z]]
tempdf['High'] = Asset['High'].loc[Asset.index[i:i+z]]
tempdf['Low'] = Asset['Low'].loc[Asset.index[i:i+z]]
#For long trades
if tradedates['OriginalTrade'].loc[tradedates['RangeIndex'] == i][0] == 1:
# print('Long entry at ', entryprice)
#Check status
# print(tempdf)
#MFE
maxup = max(tempdf['High'] - entryprice)
#MAE
maxdown = max(entryprice - tempdf['Low'])
# print('MFE in points = ', maxup)
# print('MAE in points = ', maxdown)
# print(atrwindow, ' day ATR = ', tradedates['AverageTrueRangePoints'].loc[tradedates['RangeIndex'] == i][0])
#MFE assignment to trade dates
tradedates['MFEpoints'].loc[tradedates['RangeIndex'] == i] = maxup
#MAE assignment to trade dates
tradedates['MAEpoints'].loc[tradedates['RangeIndex'] == i] = maxdown
#For short trades
if tradedates['OriginalTrade'].loc[tradedates['RangeIndex'] == i][0] == -1:
# print('Short entry at ', entryprice)
#Check status
# print(tempdf)
#MAE
maxup = max(tempdf['High'] - entryprice)
#MFE
maxdown = max(entryprice - tempdf['Low'])
# print('MFE in points = ', maxdown)
# print('MAE in points = ', maxup)
# print(atrwindow, ' day ATR = ', tradedates['AverageTrueRangePoints'].loc[tradedates['RangeIndex'] == i][0])
#MFE assignment to trade dates
tradedates['MFEpoints'].loc[tradedates['RangeIndex'] == i] = maxdown
#MAE assignment to trade dates
tradedates['MAEpoints'].loc[tradedates['RangeIndex'] == i] = maxup
# print('--------------------------------------------')
# print('--------------------------------------------')
# print('--------------------------------------------')
#Adjust MFE and MAE for volatility - normalization
tradedates['VolAdjMFE'] = tradedates['MFEpoints']/tradedates['AverageTrueRangePoints']
tradedates['VolAdjMAE'] = tradedates['MAEpoints']/tradedates['AverageTrueRangePoints']
#Add MFE and MAE values
sumMFE = sum(tradedates['VolAdjMFE'])
sumMAE = sum(tradedates['VolAdjMAE'])
#Divide by number of signals
AvgVolAdjMFE = sumMFE/numsignals
AvgVolAdjMAE = sumMAE/numsignals
#Edge ratio calculation for period
edgeratio = AvgVolAdjMFE/AvgVolAdjMAE
#Display results
print('The ', z, ' day edge ratio is', edgeratio)
#Add data to list
edgelist.append(edgeratio)
#Create dataframe to store data
edgeratioframe = pd.DataFrame(index = LengthOfTest)
edgeratioframe['EdgeRatio'] = edgelist
#Display
edgeratioframe['EdgeRatio'].plot(grid=True, figsize=(8,5))
#End timer
#end = t.time()
#Timer stats
#print((end - start), ' seconds later.')
#Highest Eratio
#print('Max eRatio is', max(edgeratioframe['EdgeRatio']))
#Graphics
#X and Y axis scale figure
figure, axe = plt.subplots(figsize = (10,5))
#Assign axis labels
plt.ylabel(ticker + ' Price')
plt.xlabel('Date')
#Overlay
#axe.plot(AssetCopy['IndexToNumber'], AssetCopy['RollingMax'], color = 'green', label = 'RollingMax')
#axe.plot(AssetCopy['IndexToNumber'], AssetCopy['RollingMin'], color = 'red', label = 'RollingMin')
#axe.plot(Asset['IndexToNumber'], Asset['SMA'], color = 'black', label = 'SMA')
#Signal triangles..
axe.scatter(Asset.loc[Asset['OriginalTrade'] == 1, 'IndexToNumber'].values,
Asset.loc[Asset['OriginalTrade'] == 1, 'Adj Close'].values, label='skitscat', color='green', s=75, marker="^")
axe.scatter(Asset.loc[Asset['OriginalTrade'] == -1, 'IndexToNumber'].values,
Asset.loc[Asset['OriginalTrade'] == -1, 'Adj Close'].values, label='skitscat', color='red', s=75, marker="v")
#Plot the DF values with the figure, object
candlestick_ohlc(axe, AssetCopy.values, width=.6, colorup='green', colordown='red')
#Format dates
axe.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
#For ATR
figure2, axe2 = plt.subplots(figsize = (10,2))
#Labels
plt.ylabel(ticker + ' ATR Percent')
plt.xlabel('Date')
#ATR line graph
axe2.plot(AssetCopy['IndexToNumber'], Asset ['AverageTrueRangePercent'], color = 'black', label = '4wkATRPercent')
#axe2.plot(AssetCopy['IndexToNumber'], AssetCopy['ATRRollingMax'], color = 'green', label = 'ATRRollingMax')
#axe2.plot(AssetCopy['IndexToNumber'], AssetCopy['ATRRollingMin'], color = 'red', label = 'ATRRollingMin')
axe2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
{
"content_hash": "af95a85a5f735cb7c7d6ed3860c58cf5",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 122,
"avg_line_length": 40.67307692307692,
"alnum_prop": 0.6200945626477541,
"repo_name": "adamrvfisher/TechnicalAnalysisLibrary",
"id": "643cf658ff3190248bcf771e4a96c38a45e8559c",
"size": "8485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ERatioSingleIssueROCII.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15514"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.