code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 GiovanniMCMXCIX
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import unittest
import async_connect as connect
class TestGetAllCatalog(unittest.TestCase):
def setUp(self):
if sys.version_info[1] == 6:
import asyncio
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.get_event_loop()
self.connect = connect.Client(loop=self.loop)
else:
self.connect = connect.Client()
self.loop = self.connect.loop
def test_release(self):
async def test():
print('\n[connect.Client.get_all_releases]')
releases = []
data = await self.connect.get_all_releases()
for release in data:
releases.append((str(release), len(await release.tracks.values())))
print(f'There are {len(releases)} total releases.')
self.loop.run_until_complete(test())
def test_track(self):
async def test():
print('\n[connect.Client.get_all_tracks]')
tracks = []
data = await self.connect.get_all_tracks()
for track in data:
tracks.append((str(track), len(track.albums)))
print(f'There are {len(tracks)} total tracks.')
self.loop.run_until_complete(test())
def test_artist(self):
async def test():
print('\n[connect.Client.get_all_artists]')
artists = []
data = await self.connect.get_all_artists()
for artist in data:
artists.append((str(artist), len(await artist.releases.values())))
print(f'There are {len(artists)} total artists.')
self.loop.run_until_complete(test())
def tearDown(self):
self.loop.run_until_complete(self.connect.close())
|
GiovanniMCMXCIX/async-connect.py
|
tests/test_get_all_catalog.py
|
Python
|
mit
| 2,905
|
#!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Xiaohui Zhang
# Apache 2.0.
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import argparse
import sys
class StrToBoolAction(argparse.Action):
""" A custom action to convert bools from shell format i.e., true/false
to python format i.e., True/False """
def __call__(self, parser, namespace, values, option_string=None):
if values == "true":
setattr(namespace, self.dest, True)
elif values == "false":
setattr(namespace, self.dest, False)
else:
raise Exception("Unknown value {0} for --{1}".format(values, self.dest))
def GetArgs():
parser = argparse.ArgumentParser(description = "Converts pronunciation statistics (from phone level decoding) "
"into a lexicon for lexicon learning. We prune the pronunciations "
"based on a provided stats file, and optionally filter out entries which are present "
"in a filter lexicon.",
epilog = "e.g. steps/dict/prons_to_lexicon.py --min-prob=0.4 \\"
"--filter-lexicon=exp/tri3_lex_0.4_work/phone_decode/filter_lexicon.txt \\"
"exp/tri3_lex_0.4_work/phone_decode/prons.txt \\"
"exp/tri3_lex_0.4_work/lexicon_phone_decoding.txt"
"See steps/dict/learn_lexicon.sh for examples in detail.")
parser.add_argument("--set-sum-to-one", type = str, default = False,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the sum of "
"probabilities is 1.")
parser.add_argument("--set-max-to-one", type = str, default = True,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the max "
"probability is 1.")
parser.add_argument("--min-prob", type = float, default = 0.1,
help = "Remove pronunciation with probabilities less "
"than this value after normalization.")
parser.add_argument("--filter-lexicon", metavar='<filter-lexicon>', type = str, default = '',
help = "Exclude entries in this filter lexicon from the output lexicon."
"each line must be <word> <phones>")
parser.add_argument("stats_file", metavar='<stats-file>', type = str,
help = "Input file containing pronunciation statistics, representing how many times "
"each word-pronunciation appear in the phonetic decoding results."
"each line must be <counts> <word> <phones>")
parser.add_argument("out_lexicon", metavar='<out-lexicon>', type = str,
help = "Output lexicon.")
print (' '.join(sys.argv), file = sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
if args.stats_file == "-":
args.stats_file_handle = sys.stdin
else:
args.stats_file_handle = open(args.stats_file)
if args.filter_lexicon is not '':
if args.filter_lexicon == "-":
args.filter_lexicon_handle = sys.stdout
else:
args.filter_lexicon_handle = open(args.filter_lexicon)
if args.out_lexicon == "-":
args.out_lexicon_handle = sys.stdout
else:
args.out_lexicon_handle = open(args.out_lexicon, "w")
if args.set_max_to_one == args.set_sum_to_one:
raise Exception("Cannot have both "
"set-max-to-one and set-sum-to-one as true or false.")
return args
def ReadStats(args):
lexicon = {}
word_count = {}
for line in args.stats_file_handle:
splits = line.strip().split()
if len(splits) < 3:
continue
word = splits[1]
count = float(splits[0])
phones = ' '.join(splits[2:])
lexicon[(word, phones)] = lexicon.get((word, phones), 0) + count
word_count[word] = word_count.get(word, 0) + count
return [lexicon, word_count]
def ReadLexicon(lexicon_file_handle):
lexicon = set()
if lexicon_file_handle:
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in lexicon file.')
word = splits[0]
phones = ' '.join(splits[1:])
lexicon.add((word, phones))
return lexicon
def ConvertWordCountsToProbs(args, lexicon, word_count):
word_probs = {}
for entry, count in lexicon.iteritems():
word = entry[0]
phones = entry[1]
prob = float(count) / float(word_count[word])
if word in word_probs:
word_probs[word].append((phones, prob))
else:
word_probs[word] = [(phones, prob)]
return word_probs
def ConvertWordProbsToLexicon(word_probs):
lexicon = {}
for word, entry in word_probs.iteritems():
for x in entry:
lexicon[(word, x[0])] = lexicon.get((word,x[0]), 0) + x[1]
return lexicon
def NormalizeLexicon(lexicon, set_max_to_one = True,
set_sum_to_one = False, min_prob = 0):
word_probs = {}
for entry, prob in lexicon.iteritems():
t = word_probs.get(entry[0], (0,0))
word_probs[entry[0]] = (t[0] + prob, max(t[1], prob))
for entry, prob in lexicon.iteritems():
if set_max_to_one:
prob = prob / word_probs[entry[0]][1]
elif set_sum_to_one:
prob = prob / word_probs[entry[0]][0]
if prob < min_prob:
prob = 0
lexicon[entry] = prob
def WriteLexicon(args, lexicon, filter_lexicon):
words = set()
num_removed = 0
num_filtered = 0
for entry, prob in lexicon.iteritems():
if prob == 0:
num_removed += 1
continue
if entry in filter_lexicon:
num_filtered += 1
continue
words.add(entry[0])
print("{0} {1}".format(entry[0], entry[1]),
file = args.out_lexicon_handle)
print ("Before pruning, the total num. pronunciations is: {}".format(len(lexicon)), file=sys.stderr)
print ("Removed {0} pronunciations by setting min_prob {1}".format(num_removed, args.min_prob), file=sys.stderr)
print ("Filtered out {} pronunciations in the filter lexicon.".format(num_filtered), file=sys.stderr)
num_prons_from_phone_decoding = len(lexicon) - num_removed - num_filtered
print ("Num. pronunciations in the output lexicon, which solely come from phone decoding"
"is {0}. num. words is {1}".format(num_prons_from_phone_decoding, len(words)), file=sys.stderr)
def Main():
args = GetArgs()
[lexicon, word_count] = ReadStats(args)
word_probs = ConvertWordCountsToProbs(args, lexicon, word_count)
lexicon = ConvertWordProbsToLexicon(word_probs)
filter_lexicon = ReadLexicon(args.filter_lexicon_handle)
NormalizeLexicon(lexicon, set_max_to_one = args.set_max_to_one,
set_sum_to_one = args.set_sum_to_one,
min_prob = args.min_prob)
WriteLexicon(args, lexicon, filter_lexicon)
args.out_lexicon_handle.close()
if __name__ == "__main__":
Main()
|
michellemorales/OpenMM
|
kaldi/egs/wsj/s5/steps/dict/prons_to_lexicon.py
|
Python
|
gpl-2.0
| 7,717
|
from helpers import NavItemTypeError, AbstractNavItem, LinkItem
class Div(object):
children = []
def add_children(self, children):
not_allowed = filter(lambda child: not issubclass(type(child), self.child_type), children)
if not_allowed:
raise NavItemTypeError("Unsupported Navigation Item.")
self.children += children
class Navbar(Div):
def __init__(self, children, nav_type="pills", fixed_to_top=True, inverted=False, justified=False):
self.children = children
self.nav_type = nav_type
self.fixed_to_top = fixed_to_top
self.inverted = inverted
self.justified = justified
self.child_type = AbstractNavItem
class Content(Div):
def __init__(self, children, width=9, align='left'):
self.children = children
self.align = align
self.child_type = LinkItem
self.width = width
class Sidebar(Div):
def __init__(self, children, affix=False, width=3, align='left'):
self.children = children
self.affix = affix
self.align = align
self.child_type = LinkItem
self.width = width
class Footer(Div):
def __init__(self, children, align='center'):
self.children = children
self.align = align
|
bingorabbit/django-toffee
|
toffee/ui.py
|
Python
|
bsd-3-clause
| 1,280
|
"""
Config module
.. versionchanged:: 0.5
The function set_default_options was removed. To achieve the same behaviour,
set a module-level dict called "DEFAULT_OPTIONS" where the keys are the
option names and the values are the default values in your plugin.
"""
import logging
from appdirs import user_config_dir
from inspect import getframeinfo, stack
from os import getenv
from os.path import basename, expanduser, join
from six import iteritems, string_types
from six.moves import configparser
_CFG = None
_FILENAME = None
#: Used as a separator when storing lists of values in the config file
_LIST_SEPARATOR = ","
#: Default settings
_CONFIG_DEFAULTS = {
"channels": "",
"plugins": "",
"nickserv_password": None,
"log_folder": expanduser("~/.lala/logs"),
"log_file": expanduser("~/.lala/lala.log"),
"encoding": "utf-8",
"fallback_encoding": "utf-8",
"max_log_days": 2,
"nickserv_admin_tracking": "false"
}
def _initialize(filename=None):
global _CFG
global _FILENAME
cfg = configparser.RawConfigParser(_CONFIG_DEFAULTS)
if filename is None:
configfiles = [join(user_config_dir(appname="lala"),
"config"),
join(getenv("HOME"), ".lala", "config"),
"/etc/lala/config"]
else:
configfiles = [filename]
files = cfg.read(configfiles)
if not cfg.has_section("base"):
cfg.add_section("base")
if not files:
raise RuntimeError(
f"Unable to read any config file. Tried: {configfiles}")
logging.info("Read config files %s", files)
logging.info("Using %s to save setting", files[0])
_CFG = cfg
_FILENAME = files[0]
def _find_current_plugin_name():
"""Tries to find the filename of the current plugin. This is essentially
the first filename different from the filename of this file ("config.py")
on the stack
"""
for elem in stack():
frameinfo = getframeinfo(elem[0])
filename = frameinfo.filename
if not __file__.startswith(filename):
return basename(filename.replace(".py", ""))
def _set(section, key, value):
if _CFG.has_section(section):
_CFG.set(section, key, value)
else:
_CFG.add_section(section)
_CFG.set(section, key, value)
if _FILENAME is not None:
with open(_FILENAME, "w") as fp:
_CFG.write(fp)
def get(key, converter=None):
"""Returns the value of a config option.
The section is the name of the calling file.
Default values for all keys can be set with :meth:`set_default_options`.
:param key: The key to lookup
"""
plugin = _find_current_plugin_name()
logging.info("%s wants to get the value of %s" % (plugin, key))
value = None
value = _CFG.get(plugin, key)
if converter is not None:
value = converter(value)
return value
def _get(section, key):
return _CFG.get(section, key)
def get_int(*args):
"""Returns the value of a config option as an int.
:param *args: See :meth:`lala.config.get`
:rtype: int
"""
return get(*args, converter=int)
def set(key, value, plugin=None):
"""Sets the ``value`` of ``key``.
The section is the name of the calling file."""
plugin = _find_current_plugin_name()
if not isinstance(value, string_types):
value = str(value)
logging.info("%s wants to set the value of %s to %s" % (plugin, key, value))
_set(plugin, key, value)
def _list_converter(value):
"""Converts a list of values into a string in which the values will be
separated by :data:`_LIST_SEPARATOR`."""
if not isinstance(value, string_types):
value = map(str, value)
value = _LIST_SEPARATOR.join(value)
return value
def get_list(*args):
"""Gets a list option.
:param *args: See :meth:`lala.config.get`
:rtype: list of strings
"""
value = get(*args, converter=_list_converter)
return value.split(_LIST_SEPARATOR)
def set_list(key, value, *args):
"""Sets option ``key`` to ``value`` where ``value`` is a list of values.
None of the values in ``value`` are allowed to contain
:data:`lala.config._LIST_SEPARATOR`.
This method does *not* preserve the type of the items in the list, they're
all passed through :meth:`str`.
:param key: See :meth:`lala.config.set`
:param value: A list of values for ``key``.
"""
value = _list_converter(value)
set(key, value, *args)
def _set_default_options(plugin, opts):
"""Sets the default options for a plugin.
The names of the arguments in ``kwargs`` will be used as the option names,
the values as the values of the options.
"""
for key, value in iteritems(opts):
if not _CFG.has_option(plugin, key):
if not isinstance(value, list):
_set(plugin, key, value)
else:
set_list(key, value, plugin)
|
mineo/lala
|
lala/config.py
|
Python
|
mit
| 4,962
|
from patapy import render_text_table
from methods import Method, MethodResult
class MyMethod(Method):
name = 'my analysis method'
help = ''
def run(self, experiment):
pass
class MyResult(MethodResult):
columns = ['score', 'p_value']
class ResultRow:
def __init__(self, score, p_value):
self.score = score
self.p_value = p_value
def test_render_text_table(capsys):
method = MyMethod()
capsys.readouterr() # clean buffers
results = MyResult(
[ResultRow(1, 0.1), ResultRow(2, 0.04)],
files=['more-info.html'],
description='Some important message'
)
render_text_table(method, results)
std, err = capsys.readouterr()
# message
assert 'Some important message' in std
# table
for row in ['score p_value', '1 0.1', '2 0.04']:
assert row in std
# files
assert 'more-info.html' in std
# no errors
assert not err
|
kn-bibs/pathways-analysis
|
tests/test_run.py
|
Python
|
mit
| 955
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Condition.condtion_data'
db.delete_column(u'server_condition', 'condtion_data')
# Adding field 'Condition.condition_data'
db.add_column(u'server_condition', 'condition_data',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
def backwards(self, orm):
# Adding field 'Condition.condtion_data'
db.add_column(u'server_condition', 'condtion_data',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
# Deleting field 'Condition.condition_data'
db.delete_column(u'server_condition', 'condition_data')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'server.businessunit': {
'Meta': {'ordering': "['name']", 'object_name': 'BusinessUnit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'server.condition': {
'Meta': {'ordering': "['condition_name']", 'object_name': 'Condition'},
'condition_data': ('django.db.models.fields.TextField', [], {}),
'condition_name': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['server.Machine']"})
},
u'server.fact': {
'Meta': {'ordering': "['fact_name']", 'object_name': 'Fact'},
'fact_data': ('django.db.models.fields.TextField', [], {}),
'fact_name': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['server.Machine']"})
},
u'server.machine': {
'Meta': {'ordering': "['hostname']", 'object_name': 'Machine'},
'activity': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'console_user': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'cpu_speed': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'cpu_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hd_percent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hd_space': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hd_total': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'machine_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['server.MachineGroup']"}),
'machine_model': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'manifest': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'memory': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'memory_kb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'munki_version': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'report': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'warnings': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'server.machinegroup': {
'Meta': {'ordering': "['name']", 'object_name': 'MachineGroup'},
'business_unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['server.BusinessUnit']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'server.pendingappleupdate': {
'Meta': {'ordering': "['display_name']", 'unique_together': "(('machine', 'update'),)", 'object_name': 'PendingAppleUpdate'},
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['server.Machine']"}),
'update': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'update_version': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
u'server.pendingupdate': {
'Meta': {'ordering': "['display_name']", 'unique_together': "(('machine', 'update'),)", 'object_name': 'PendingUpdate'},
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['server.Machine']"}),
'update': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'update_version': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
u'server.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'default': "'SO'", 'max_length': '2'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['server']
|
salsoftware/sal
|
server/migrations/0015_auto__del_field_condition_condtion_data__add_field_condition_condition.py
|
Python
|
gpl-3.0
| 10,306
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import os, sys, json
from fabric.api import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--storage', type=str, default='')
parser.add_argument('--cloud', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--azure_storage_account', type=str, default='')
parser.add_argument('--azure_datalake_account', type=str, default='')
args = parser.parse_args()
def prepare_templates():
try:
local('/bin/bash -c "source /etc/profile && wget http://files.fast.ai/data/dogscats.zip -O /tmp/dogscats.zip"')
local('unzip -q /tmp/dogscats.zip -d /tmp')
local('/bin/bash -c "mkdir -p /home/{0}/{1}"'.format(args.os_user, "{test,train}"))
local('mv /tmp/dogscats/test1/* /home/{0}/test'.format(args.os_user))
local('/bin/bash -c "mv /tmp/dogscats/valid/{0}/* /home/{1}/train"'.format("{cats,dogs}", args.os_user))
local('/bin/bash -c "mv /tmp/dogscats/train/{0}/* /home/{1}/train"'.format("{cats,dogs}", args.os_user))
except Exception as err:
print('Failed to download/unpack image dataset!', str(err))
sys.exit(1)
local('mkdir -p /home/{0}/logs'.format(args.os_user))
local('mv /tmp/tensor /home/{0}/test_templates'.format(args.os_user))
def get_storage():
storages = {"aws": args.storage,
"azure": "{0}@{1}.blob.core.windows.net".format(args.storage, args.azure_storage_account),
"gcp": args.storage}
protocols = {"aws": "s3a", "azure": "wasbs", "gcp": "gs"}
if args.azure_datalake_account:
storages['azure'] = "{0}.azuredatalakestore.net/{1}".format(args.azure_datalake_account, args.storage)
protocols['azure'] = 'adl'
return (storages[args.cloud], protocols[args.cloud])
def prepare_ipynb(kernel_name, template_path, ipynb_name):
with open(template_path, 'r') as f:
text = f.read()
text = text.replace('KERNEL_NAME', kernel_name)
with open('/home/{}/{}.ipynb'.format(args.os_user, ipynb_name), 'w') as f:
f.write(text)
def run_ipynb(ipynb_name):
local('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; ' \
'jupyter nbconvert --ExecutePreprocessor.timeout=-1 --ExecutePreprocessor.startup_timeout=300 --execute /home/{}/{}.ipynb'.format(args.os_user, ipynb_name))
def run_tensor():
interpreters = ['pyspark_local']
for i in interpreters:
prepare_ipynb(i, '/home/{}/test_templates/template_preparation_tensor.ipynb'.format(args.os_user), 'preparation_tensor')
run_ipynb('preparation_tensor')
prepare_ipynb(i, '/home/{}/test_templates/template_visualization_tensor.ipynb'.format(args.os_user), 'visualization_tensor')
run_ipynb('visualization_tensor')
if __name__ == "__main__":
try:
prepare_templates()
run_tensor()
except Exception as err:
print('Error!', str(err))
sys.exit(1)
sys.exit(0)
|
epam/DLab
|
integration-tests/examples/scenario_tensor/tensor_tests.py
|
Python
|
apache-2.0
| 3,794
|
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
def test_GeoJSONLayer():
s3gis_tests.layer_test(
db,
db.gis_layer_geojson,
dict(
name = "Test GeoJSON",
description = "Test GeoJSON layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
url = "test://test_GeoJSON",
),
"S3.gis.layers_geojson",
[
{
"marker_height": 34,
"marker_image": u"gis_marker.image.marker_red.png",
"marker_width": 20,
"name": u"Test GeoJSON",
"url": u"test://test_GeoJSON"
}
],
session = session,
request = request,
)
|
devinbalkind/eden
|
tests/unit_tests/modules/s3/s3gis/GeoJSONLayer.py
|
Python
|
mit
| 812
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 Kévin Gomez <contact@kevingomez.fr>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealing
# in the Software.
from gigadl.result.ResultBase import ResultBase
class MegaResult(ResultBase):
""" Result object returned by the MegaVideo provider """
def __str__(self):
return '[Megavideo] %s ' % str(super(MegaResult, self).__str__())
|
K-Phoen/GigaDl
|
gigadl/result/MegaResult.py
|
Python
|
mit
| 1,414
|
#!/usr/bin/env python3
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Sets up githooks.
'''
import os
import subprocess
import sys
SRC_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
FLUTTER_DIR = os.path.join(SRC_ROOT, 'flutter')
def IsWindows():
os_id = sys.platform
return os_id.startswith('win32') or os_id.startswith('cygwin')
def Main(argv):
githooks = os.path.join(FLUTTER_DIR, 'tools', 'githooks')
if IsWindows():
githooks = os.path.join(githooks, 'windows')
result = subprocess.run([
'git',
'config',
'core.hooksPath',
githooks,
], cwd=FLUTTER_DIR)
return result.returncode
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
flutter/engine
|
tools/githooks/setup.py
|
Python
|
bsd-3-clause
| 852
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to compute receptive field of a fully-convolutional network.
Please refer to the following g3doc for detailed explanation on how this
computation is performed, and why it is important:
g3doc/photos/vision/features/delf/g3doc/rf_computation.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.receptive_field.python.util import graph_compute_order
from tensorflow.contrib.util import make_ndarray
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.framework import ops as framework_ops
import numpy as np
# White-listed layer operations, which do not affect the receptive field
# computation.
_UNCHANGED_RF_LAYER_OPS = [
"Add", "BiasAdd", "Cast", "Ceil", "ConcatV2", "Const", "Floor",
"FusedBatchNorm", "Identity", "Log", "Mul", "Pow", "RealDiv", "Relu",
"Relu6", "Round", "Rsqrt", "Softplus", "Sub", "VariableV2"
]
# Different ways in which padding modes may be spelled.
_VALID_PADDING = ["VALID", b"VALID"]
_SAME_PADDING = ["SAME", b"SAME"]
def _stride_size(node):
"""Computes stride size given a TF node.
Args:
node: Tensorflow node (NodeDef proto).
Returns:
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
"""
strides_attr = node.attr["strides"]
logging.vlog(4, "strides_attr = %s", strides_attr)
stride_y = strides_attr.list.i[1]
stride_x = strides_attr.list.i[2]
return stride_x, stride_y
def _conv_kernel_size(node, name_to_order_node):
"""Computes kernel size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
Raises:
ValueError: If the weight layer node is invalid.
"""
weights_layer_read_name = node.input[1]
if not weights_layer_read_name.endswith("/read"):
raise ValueError(
"Weight layer's name input to conv layer does not end with '/read'")
weights_layer_param_name = weights_layer_read_name[:-5]
weights_node = name_to_order_node[weights_layer_param_name].node
if weights_node.op != "VariableV2":
raise ValueError("Weight layer is not of type VariableV2")
shape = weights_node.attr["shape"]
logging.vlog(4, "weight shape = %s", shape)
kernel_size_y = shape.shape.dim[0].size
kernel_size_x = shape.shape.dim[1].size
return kernel_size_x, kernel_size_y
def _padding_size_conv_pool(node, kernel_size, stride):
"""Computes padding size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
kernel_size: Kernel size of node (integer).
stride: Stride size of node (integer).
Returns:
padding: Padding size (integer).
Raises:
ValueError: If padding is invalid.
"""
# In this case, we need to carefully consider the different TF padding modes.
# The padding depends on kernel size, and may depend on input size. If it
# depends on input size, we raise an exception.
padding_attr = node.attr["padding"]
logging.vlog(4, "padding_attr = %s", padding_attr)
if padding_attr.s in _VALID_PADDING:
padding = 0
elif padding_attr.s in _SAME_PADDING:
if kernel_size == 1:
padding = 0
elif stride == 1:
padding = int(math.floor((float(kernel_size) - 1) / 2))
elif stride == 2 and kernel_size % 2 == 0:
padding = int(math.floor((float(kernel_size) - 1) / 2))
else:
padding = None
logging.warning(
"Padding depends on input size, which means that the effective "
"padding may be different depending on the input image "
"dimensionality. In this case, alignment check will be skipped.")
else:
raise ValueError("Invalid padding operation %s" % padding_attr.s)
return padding
def _pool_kernel_size(node):
"""Computes kernel size given a TF pooling node.
Args:
node: Tensorflow node (NodeDef proto).
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
Raises:
ValueError: If pooling is invalid.
"""
ksize = node.attr["ksize"]
kernel_size_y = ksize.list.i[1]
kernel_size_x = ksize.list.i[2]
if ksize.list.i[0] != 1:
raise ValueError("pool ksize for first dim is not 1")
if ksize.list.i[3] != 1:
raise ValueError("pool ksize for last dim is not 1")
return kernel_size_x, kernel_size_y
def _padding_size_pad_layer(node, name_to_order_node):
"""Computes padding size given a TF padding node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
padding_x: Padding size for horizontal direction (integer).
padding_y: Padding size for vertical direction (integer).
Raises:
ValueError: If padding layer is invalid.
"""
paddings_layer_name = node.input[1]
if not paddings_layer_name.endswith("/paddings"):
raise ValueError("Padding layer name does not end with '/paddings'")
paddings_node = name_to_order_node[paddings_layer_name].node
if paddings_node.op != "Const":
raise ValueError("Padding op is not Const")
value = paddings_node.attr["value"]
t = make_ndarray(value.tensor)
padding_y = t[1][0]
padding_x = t[2][0]
if t[0][0] != 0:
raise ValueError("padding is not zero for first tensor dim")
if t[3][0] != 0:
raise ValueError("padding is not zero for last tensor dim")
return padding_x, padding_y
def _get_layer_params(node, name_to_order_node):
"""Gets layer parameters relevant for RF computation.
Currently, only these nodes are supported:
- Conv2D
- DepthwiseConv2dNative
- Pad
- MaxPool
- AvgPool
- all nodes listed in _UNCHANGED_RF_LAYER_OPS
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
padding_x: Padding size for horizontal direction (integer).
padding_y: Padding size for vertical direction (integer).
Raises:
ValueError: If layer op is unknown.
"""
logging.vlog(3, "node.op = %s", node.op)
logging.vlog(4, "node = %s", node)
if node.op == "Conv2D" or node.op == "DepthwiseConv2dNative":
stride_x, stride_y = _stride_size(node)
kernel_size_x, kernel_size_y = _conv_kernel_size(node, name_to_order_node)
# Compute the padding for this node separately for each direction.
padding_x = _padding_size_conv_pool(node, kernel_size_x, stride_x)
padding_y = _padding_size_conv_pool(node, kernel_size_y, stride_y)
elif node.op == "Pad":
# Kernel and stride are simply 1 in this case.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
padding_x, padding_y = _padding_size_pad_layer(node, name_to_order_node)
elif node.op == "MaxPool" or node.op == "AvgPool":
stride_x, stride_y = _stride_size(node)
kernel_size_x, kernel_size_y = _pool_kernel_size(node)
# Compute the padding for this node separately for each direction.
padding_x = _padding_size_conv_pool(node, kernel_size_x, stride_x)
padding_y = _padding_size_conv_pool(node, kernel_size_y, stride_y)
elif node.op in _UNCHANGED_RF_LAYER_OPS:
# These nodes do not modify the RF parameters.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
padding_x = 0
padding_y = 0
else:
raise ValueError("Unknown layer for operation '%s': %s" % (node.name,
node.op))
return kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y
def _reverse_sort_by_order(name_to_order_node):
"""Sorts map of name_to_order_node nodes in reverse order.
The output is such that the nodes in name_to_order_node are sorted in
descending order of the "order" field.
Args:
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
sorted_name_to_order_node: Sorted version of the input, in descending order.
"""
return sorted(name_to_order_node.items(), key=lambda x: -x[1].order)
def _get_rf_size_node_input(stride, kernel_size, rf_size_output):
"""Computes RF size at the input of a given layer.
Args:
stride: Stride of given layer (integer).
kernel_size: Kernel size of given layer (integer).
rf_size_output: RF size at output of given layer (integer).
Returns:
rf_size_input: RF size at input of given layer (integer).
"""
return stride * rf_size_output + kernel_size - stride
def _get_effective_stride_node_input(stride, effective_stride_output):
"""Computes effective stride at the input of a given layer.
Args:
stride: Stride of given layer (integer).
effective_stride_output: Effective stride at output of given layer
(integer).
Returns:
effective_stride_input: Effective stride at input of given layer
(integer).
"""
return stride * effective_stride_output
def _get_effective_padding_node_input(stride, padding,
effective_padding_output):
"""Computes effective padding at the input of a given layer.
Args:
stride: Stride of given layer (integer).
padding: Padding of given layer (integer).
effective_padding_output: Effective padding at output of given layer
(integer).
Returns:
effective_padding_input: Effective padding at input of given layer
(integer).
"""
return stride * effective_padding_output + padding
class ReceptiveField:
"""Receptive field of a convolutional neural network.
Args:
size: Receptive field size.
stride: Effective stride.
padding: Effective padding.
"""
def __init__(self, size, stride, padding):
self.size = np.asarray(size)
self.stride = np.asarray(stride)
self.padding = np.asarray(padding)
def compute_input_center_coordinates(self, y, axis=None):
"""Computes the center of the receptive field that generated a feature.
Args:
y: An array of feature coordinates with shape `(..., d)`, where `d` is the
number of dimensions of the coordinates.
axis: The dimensions for which to compute the input center coordinates.
If `None` (the default), compute the input center coordinates for all
dimensions.
Returns:
x: Center of the receptive field that generated the features, at the input
of the network.
Raises:
ValueError: If the number of dimensions of the feature coordinates does
not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
y = np.asarray(y)
if y.shape[-1] != len(axis):
raise ValueError("Dimensionality of the feature coordinates `y` (%d) "
"does not match dimensionality of `axis` (%d)" %
(y.shape[-1], len(axis)))
return - self.padding[axis] + y * self.stride[axis] + \
(self.size[axis] - 1) / 2
def compute_feature_coordinates(self, x, axis=None):
"""Computes the position of a feature given the center of a receptive field.
Args:
x: An array of input center coordinates with shape `(..., d)`, where `d`
is the number of dimensions of the coordinates.
axis: The dimensions for which to compute the feature coordinates.
If `None` (the default), compute the feature coordinates for all
dimensions.
Returns:
y: Coordinates of the features.
Raises:
ValueError: If the number of dimensions of the input center coordinates
does not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
x = np.asarray(x)
if x.shape[-1] != len(axis):
raise ValueError("Dimensionality of the input center coordinates `x` "
"(%d) does not match dimensionality of `axis` (%d)" %
(x.shape[-1], len(axis)))
return (x + self.padding[axis] + (1 - self.size[axis]) / 2) / \
self.stride[axis]
def __iter__(self):
return iter(np.concatenate([self.size, self.stride, self.padding]))
def compute_receptive_field_from_graph_def(graph_def,
input_node,
output_node,
stop_propagation=None):
"""Computes receptive field (RF) parameters from a Graph or GraphDef object.
The algorithm stops the calculation of the receptive field whenever it
encounters an operation in the list `stop_propagation`. Stopping the
calculation early can be useful to calculate the receptive field of a
subgraph such as a single branch of the
[inception network](https://arxiv.org/abs/1512.00567).
Args:
graph_def: Graph or GraphDef object.
input_node: Name of the input node or Tensor object from graph.
output_node: Name of the output node or Tensor object from graph.
stop_propagation: List of operation or scope names for which to stop the
propagation of the receptive field.
Returns:
rf_size_x: Receptive field size of network in the horizontal direction, with
respect to specified input and output.
rf_size_y: Receptive field size of network in the vertical direction, with
respect to specified input and output.
effective_stride_x: Effective stride of network in the horizontal direction,
with respect to specified input and output.
effective_stride_y: Effective stride of network in the vertical direction,
with respect to specified input and output.
effective_padding_x: Effective padding of network in the horizontal
direction, with respect to specified input and output.
effective_padding_y: Effective padding of network in the vertical
direction, with respect to specified input and output.
Raises:
ValueError: If network is not aligned or if either input or output nodes
cannot be found. For network criterion alignment, see
photos/vision/features/delf/g3doc/rf_computation.md
"""
# Convert a graph to graph_def if necessary.
if isinstance(graph_def, framework_ops.Graph):
graph_def = graph_def.as_graph_def()
# Convert tensors to names.
if isinstance(input_node, framework_ops.Tensor):
input_node = input_node.op.name
if isinstance(output_node, framework_ops.Tensor):
output_node = output_node.op.name
stop_propagation = stop_propagation or []
# Computes order of computation for a given graph.
name_to_order_node = graph_compute_order.get_compute_order(
graph_def=graph_def)
# Sort in reverse topological order.
order = _reverse_sort_by_order(name_to_order_node)
# Dictionaries to keep track of receptive field, effective stride and
# effective padding of different nodes.
rf_sizes_x = {}
rf_sizes_y = {}
effective_strides_x = {}
effective_strides_y = {}
effective_paddings_x = {}
effective_paddings_y = {}
# Initialize dicts for output_node.
rf_sizes_x[output_node] = 1
rf_sizes_y[output_node] = 1
effective_strides_x[output_node] = 1
effective_strides_y[output_node] = 1
effective_paddings_x[output_node] = 0
effective_paddings_y[output_node] = 0
# Flag to denote if we found output node yet. If we have not, we skip nodes
# until the output node is found.
found_output_node = False
# Flag to denote if padding is undefined. This happens when SAME padding mode
# is used in conjunction with stride and kernel sizes which make it such that
# the padding to be applied would depend on the input size. In this case,
# alignment checks are skipped, and the effective padding is None.
undefined_padding = False
for _, (o, node) in order:
if node:
logging.vlog(3, "%10d %-100s %-20s" % (o, node.name[:90], node.op))
else:
continue
# When we find input node, we can stop.
if node.name == input_node:
break
# Loop until we find the output node. All nodes before finding the output
# one are irrelevant, so they can be skipped.
if not found_output_node:
if node.name == output_node:
found_output_node = True
if found_output_node:
if node.name not in rf_sizes_x:
assert node.name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % node.name)
# In this case, node is not relevant since it's not part of the
# computation we're interested in.
logging.vlog(3, "Irrelevant node %s, skipping it...", node.name)
continue
# Get params for this layer.
kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y = (
_get_layer_params(node, name_to_order_node))
logging.vlog(3, "kernel_size_x = %s, kernel_size_y = %s, "
"stride_x = %s, stride_y = %s, "
"padding_x = %s, padding_y = %s" %
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y))
if padding_x is None or padding_y is None:
undefined_padding = True
# Get parameters at input of this layer which may or may not be propagated
# to the input layers.
rf_size_input_x = _get_rf_size_node_input(stride_x, kernel_size_x,
rf_sizes_x[node.name])
rf_size_input_y = _get_rf_size_node_input(stride_y, kernel_size_y,
rf_sizes_y[node.name])
effective_stride_input_x = _get_effective_stride_node_input(
stride_x, effective_strides_x[node.name])
effective_stride_input_y = _get_effective_stride_node_input(
stride_y, effective_strides_y[node.name])
if not undefined_padding:
effective_padding_input_x = _get_effective_padding_node_input(
stride_x, padding_x, effective_paddings_x[node.name])
effective_padding_input_y = _get_effective_padding_node_input(
stride_y, padding_y, effective_paddings_y[node.name])
else:
effective_padding_input_x = None
effective_padding_input_y = None
# Loop over this node's inputs and potentially propagate information down.
for inp_name in node.input:
# Stop the propagation of the receptive field.
if any(inp_name.startswith(stop) for stop in stop_propagation):
logging.vlog(3, "Skipping explicitly ignored node %s.", node.name)
continue
logging.vlog(4, "inp_name = %s", inp_name)
if inp_name.startswith("^"):
# The character "^" denotes a control dependency, so this input node
# can be safely ignored.
continue
inp_node = name_to_order_node[inp_name].node
logging.vlog(4, "inp_node = \n%s", inp_node)
if inp_node.name in rf_sizes_x:
assert inp_node.name in rf_sizes_y, (
"Node %s is in rf_sizes_x, but "
"not in rf_sizes_y" % inp_node.name)
# This node was already discovered through a previous path, so we need
# to make sure that graph is aligned. This alignment check is skipped
# if the padding is not defined, since in this case alignment cannot
# be checked.
if not undefined_padding:
if effective_strides_x[inp_node.name] != effective_stride_input_x:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in horizontal direction")
if effective_strides_y[inp_node.name] != effective_stride_input_y:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in vertical direction")
if (rf_sizes_x[inp_node.name] - 1
) / 2 - effective_paddings_x[inp_node.name] != (
rf_size_input_x - 1) / 2 - effective_padding_input_x:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in horizontal direction")
if (rf_sizes_y[inp_node.name] - 1
) / 2 - effective_paddings_y[inp_node.name] != (
rf_size_input_y - 1) / 2 - effective_padding_input_y:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in vertical direction")
# Keep track of path with largest RF, for both directions.
if rf_sizes_x[inp_node.name] < rf_size_input_x:
rf_sizes_x[inp_node.name] = rf_size_input_x
effective_strides_x[inp_node.name] = effective_stride_input_x
effective_paddings_x[inp_node.name] = effective_padding_input_x
if rf_sizes_y[inp_node.name] < rf_size_input_y:
rf_sizes_y[inp_node.name] = rf_size_input_y
effective_strides_y[inp_node.name] = effective_stride_input_y
effective_paddings_y[inp_node.name] = effective_padding_input_y
else:
assert inp_node.name not in rf_sizes_y, (
"Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % inp_node.name)
# In this case, it is the first time we encounter this node. So we
# propagate the RF parameters.
rf_sizes_x[inp_node.name] = rf_size_input_x
rf_sizes_y[inp_node.name] = rf_size_input_y
effective_strides_x[inp_node.name] = effective_stride_input_x
effective_strides_y[inp_node.name] = effective_stride_input_y
effective_paddings_x[inp_node.name] = effective_padding_input_x
effective_paddings_y[inp_node.name] = effective_padding_input_y
if not found_output_node:
raise ValueError("Output node was not found")
if input_node not in rf_sizes_x:
raise ValueError("Input node was not found")
return ReceptiveField(
(rf_sizes_x[input_node], rf_sizes_y[input_node]),
(effective_strides_x[input_node], effective_strides_y[input_node]),
(effective_paddings_x[input_node], effective_paddings_y[input_node]))
|
jwlawson/tensorflow
|
tensorflow/contrib/receptive_field/python/util/receptive_field.py
|
Python
|
apache-2.0
| 23,712
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.murano import environments
from tests.unit import test
CTX = "rally.task.context"
MURANO_SCENARIO = ("rally.plugins.openstack.scenarios.murano."
"environments.MuranoEnvironments")
class MuranoEnvironmentsTestCase(test.TestCase):
def _get_context(self):
return {
"tenant": {
"packages": [mock.MagicMock(fully_qualified_name="fake")]
},
"user": {
"tenant_id": "fake_tenant_id"
},
"config": {
"murano_packages": {
"app_package": (
"rally-jobs/extra/murano/"
"applications/HelloReporter/"
"io.murano.apps.HelloReporter.zip")
}
}
}
@mock.patch(MURANO_SCENARIO + "._list_environments")
def test_list_environments(self, mock__list_environments):
scenario = environments.MuranoEnvironments()
scenario._list_environments()
mock__list_environments.assert_called_once_with()
@mock.patch(MURANO_SCENARIO + "._create_session")
@mock.patch(MURANO_SCENARIO + "._delete_environment")
@mock.patch(MURANO_SCENARIO + "._create_environment")
@mock.patch(MURANO_SCENARIO + "._generate_random_name")
def test_create_and_delete_environment(
self, mock__generate_random_name, mock__create_environment,
mock__delete_environment, mock__create_session):
scenario = environments.MuranoEnvironments()
fake_environment = mock.Mock(id="fake_id")
mock__create_environment.return_value = fake_environment
mock__generate_random_name.return_value = "foo"
scenario.create_and_delete_environment()
mock__create_environment.assert_called_once_with()
mock__create_session.assert_called_once_with(fake_environment.id)
mock__delete_environment.assert_called_once_with(fake_environment)
@mock.patch(MURANO_SCENARIO + "._create_environment")
@mock.patch(MURANO_SCENARIO + "._create_session")
@mock.patch(MURANO_SCENARIO + "._create_service")
@mock.patch(MURANO_SCENARIO + "._deploy_environment")
def test_create_and_deploy_environment(
self, mock__deploy_environment, mock__create_service,
mock__create_session, mock__create_environment):
fake_environment = mock.MagicMock(id="fake_env_id")
mock__create_environment.return_value = fake_environment
fake_session = mock.Mock(id="fake_session_id")
mock__create_session.return_value = fake_session
scenario = environments.MuranoEnvironments()
scenario.context = self._get_context()
scenario.context["tenants"] = {
"fake_tenant_id": {
"packages": [mock.MagicMock()]
}
}
scenario.create_and_deploy_environment(1)
mock__create_environment.assert_called_once_with()
mock__create_session.assert_called_once_with(fake_environment.id)
mock__create_service.assert_called_once_with(
fake_environment, fake_session, "fake", atomic_action=False)
mock__deploy_environment.assert_called_once_with(
fake_environment, fake_session)
|
go-bears/rally
|
tests/unit/plugins/openstack/scenarios/murano/test_environments.py
|
Python
|
apache-2.0
| 3,933
|
import pathlib
from ...helpers import article
from .._helpers import _read, register
source = article(
authors=["Linbo Zhang", "Tao Cui", "Hui Liu"],
title="A set of symmetric quadrature rules on triangles and tetrahedra",
journal="Journal of Computational Mathematics",
volume="27",
number="1",
month="jan",
year="2009",
pages="89-96",
url="https://www.jstor.org/stable/43693493",
)
this_dir = pathlib.Path(__file__).resolve().parent
def zhang_cui_liu_1():
return _read(this_dir / "zhang_cui_liu_1.json", source)
def zhang_cui_liu_2():
return _read(this_dir / "zhang_cui_liu_2.json", source)
def zhang_cui_liu_3():
return _read(this_dir / "zhang_cui_liu_3.json", source)
register([zhang_cui_liu_1, zhang_cui_liu_2, zhang_cui_liu_3])
|
nschloe/quadpy
|
src/quadpy/t2/_zhang_cui_liu/__init__.py
|
Python
|
mit
| 793
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cellulist documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import cellulist
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cellulist'
copyright = u'2015, Elliot Marsden'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cellulist.__version__
# The full version, including alpha/beta/rc tags.
release = cellulist.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cellulistdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'cellulist.tex',
u'cellulist Documentation',
u'Elliot Marsden', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cellulist',
u'cellulist Documentation',
[u'Elliot Marsden'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cellulist',
u'cellulist Documentation',
u'Elliot Marsden',
'cellulist',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
eddiejessup/cellulist
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,421
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Context Tracking text data into TF Example protos for model training."""
import os
import re
from typing import List, Text
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from contrack import bert_client as bert_client_module
from contrack import encoding
from contrack import word2vec_client as word2vec_client_module
from contrack import signals as signals_module
# Flags
flags.DEFINE_string('input_file', '/tmp/input.txt', 'input file path')
flags.DEFINE_string('output_dir', '/tmp/output', 'output directory path')
flags.DEFINE_string('wordvec_path', '/tmp/GoogleNews-vectors-negative300.bin',
'Path to word2vec embedding file.')
flags.DEFINE_integer(
'max_seq_length', 128,
'The maximum total input sequence length after WordPiece tokenization. '
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded.')
flags.DEFINE_string(
'tokenizer_handle',
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'The TFHub handle of the BERT preprocessing model used '
'for tokenization.')
flags.DEFINE_string(
'bert_handle',
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3',
'The TFHub handle of the BERT model used for preprocessing.')
FLAGS = flags.FLAGS
# String Constants
ENVIRONMENT = 'env' # Used for introducing enrefs without message or sender.
GENDER_FLAGS = {'f': 'female', 'm': 'male', 'n': 'neuter', 'u': 'unknown'}
class Message(object):
"""Represents one message in a conversation."""
def __init__(self, msg_id, sender, words,
signals, entities,
enrefs):
self.msg_id = msg_id
self.sender = sender
self.words = list(words)
self.signals = list(signals)
self.entities = entities
self.enrefs = enrefs
self.wordvecs = None
self.tokens = None
self.token_ids = None
self.token_wordvecs = None
self.token_signals = None
self.token_bertvecs = None
def tokenize(self, tokenizer,
empty_wordvec):
"""Tokenize message text and re-assign wordvecs and signals ."""
max_length = FLAGS.max_seq_length - 2 # Account for [CLS] and [SEP].
utterance = ' '.join(self.words[-max_length:])
tokens, token_ids = tokenizer.tokenize(utterance)
tokens = ['[CLS]'] + tokens + ['[SEP]']
token_ids = [101] + token_ids + [102]
word_to_token = {0: 1} # Skip first token which is CLS]
word_index = 0
for i in range(1, len(tokens)): # Also skip [CLS]
token = tokens[i]
if token.startswith('['):
continue
if token.startswith('##'):
token = token[2:]
if (word_index < len(self.words) and
self.words[word_index].startswith(token)):
word_to_token[word_index] = i
word_index += 1
if word_index != len(self.words):
logging.info('word_index: %d len(words):%d', word_index, len(self.words))
logging.info(str(word_to_token))
logging.fatal('Cannot align words %s with tokens %s', str(self.words),
str(tokens))
word_to_token[len(self.words)] = len(tokens) - 1 # skip [SEP]
# Convert indices into words to indices into tokens
enrefs = self.enrefs
for enref in enrefs:
(word_start, word_end) = enref.word_span
enref.token_span = (word_to_token[word_start], word_to_token[word_end])
# Computer per-token wordvecs and signals
token_wordvecs = []
token_signals = []
prev_wordvec = empty_wordvec
prev_signals = []
for word_index, token_index in word_to_token.items():
while len(token_wordvecs) < token_index:
token_wordvecs += [prev_wordvec]
token_signals += [prev_signals]
if word_index < len(self.wordvecs):
prev_wordvec = self.wordvecs[word_index]
prev_signals = self.signals[word_index]
token_wordvecs += [empty_wordvec]
token_signals += [[]]
self.tokens = tokens
self.token_ids = token_ids
self.token_wordvecs = token_wordvecs
self.token_signals = token_signals
class Conversation(object):
"""Represents a conversation to be preprocessed."""
def __init__(self, conversation_id, scenario_id):
self.conversation_id = str(conversation_id)
self.scenario_id = scenario_id
self.messages = []
def add_message(self, sender, words, signals,
entities,
enrefs):
msg_id = self.conversation_id + ':' + str(len(self.messages))
msg = Message(msg_id, sender, words, signals, entities, enrefs)
self.messages.append(msg)
def add_wordvecs(self,
word2vec_client):
for message in self.messages:
vecs = word2vec_client.lookup(message.words)
message.wordvecs = vecs
def tokenize(self, tokenizer,
empty_wordvec):
for message in self.messages:
message.tokenize(tokenizer, empty_wordvec)
def _build_seq_examples(
conversation,
encodings):
"""Builds SequenceExample protos from the conversations."""
seq_examples = []
enrefs = []
participants = conversation.messages[0].tokens[1:-1]
for example_index, message in enumerate(conversation.messages):
sender = message.sender
tokens = message.tokens
msg_enrefs = message.enrefs
wordvecs = message.token_wordvecs
bertvecs = message.token_bertvecs
seq_example = tf.train.SequenceExample()
seq_example.context.feature['state_seq_length'].int64_list.value.append(
len(enrefs))
seq_example.context.feature['token_seq_length'].int64_list.value.append(
len(tokens))
seq_example.context.feature['sender'].bytes_list.value.append(
message.sender.encode())
seq_example.context.feature['scenario_id'].bytes_list.value.append(
conversation.scenario_id.encode())
for p in participants:
seq_example.context.feature['participants'].bytes_list.value.append(
p.encode())
state_seq = seq_example.feature_lists.feature_list['state_seq']
token_seq = seq_example.feature_lists.feature_list['token_seq']
word_seq = seq_example.feature_lists.feature_list['word_seq']
annotation_seq = seq_example.feature_lists.feature_list['annotation_seq']
# Add enref sequence
for enref in enrefs:
entity_name = enref.entity_name
enref.enref_context.set_is_sender(entity_name == sender)
enref.enref_context.set_is_recipient(entity_name != sender and
entity_name in participants)
enref.enref_context.set_message_offset(
enref.enref_context.get_message_offset() + 1)
state_seq.feature.add().float_list.value.extend(np.array(enref.array))
# Store enref vectors in predictions
predictions = [encodings.new_prediction_array() for _ in tokens]
for enref in msg_enrefs:
start, end = enref.token_span
enref.wordvec.set(np.mean(wordvecs[start:end], 0))
enref.bert.set(np.mean(bertvecs[start:end], 0))
enrefs.append(enref)
for index in range(start, end):
prediction_enc = encodings.as_prediction_encoding(predictions[index])
prediction_enc.enref_meta.replace(enref.enref_meta.slice())
if enref.enref_meta.is_new() > 0.0 and index != start:
prediction_enc.enref_meta.set_is_new(False)
prediction_enc.enref_meta.set_is_new_continued(True)
prediction_enc.enref_id.replace(enref.enref_id.slice())
prediction_enc.enref_properties.replace(enref.enref_properties.slice())
prediction_enc.enref_membership.replace(enref.enref_membership.slice())
# Add tokens and predictions
for i, token in enumerate(tokens):
token_enc = encodings.new_token_encoding(token, message.token_signals[i],
message.token_wordvecs[i],
message.token_bertvecs[i])
token_seq.feature.add().float_list.value.extend(token_enc.array)
word_seq.feature.add().bytes_list.value.append(token.encode())
annotation_seq.feature.add().float_list.value.extend(predictions[i])
if example_index > 0:
seq_examples.append(seq_example)
return seq_examples
def _parse_enrefs(encodings, entities,
utterance, sender,
declarations):
"""Parses the enref declarations."""
enrefs = []
participants = entities[:2]
for decl in declarations:
if not decl:
continue
is_new = False
if decl[-1] != ']':
raise Exception('Missing bracket in enref declaration %s' % decl)
decl = decl[:-1]
elements = decl.split(' ')
if len(elements) != 3:
raise Exception('Invalid enref declaration %s' % decl)
entity_name = elements[0]
domain = 'people'
if entity_name.startswith('person:') or entity_name.startswith('p:'):
domain = 'people'
entity_name = re.sub(r'^.*?:', '', entity_name)
if entity_name.startswith('location:') or entity_name.startswith('l:'):
domain = 'locations'
entity_name = re.sub(r'^.*?:', '', entity_name)
if entity_name not in entities:
entities.append(entity_name)
is_new = True
span = [int(k.strip()) for k in elements[2].split('-')]
if len(span) != 2:
raise Exception('Invalid span in enref declaration %s' % decl)
span_words = utterance.split(' ')[span[0]:(span[1] + 1)]
span_text = ' '.join(span_words)
enref = encodings.new_enref_encoding()
enref.populate(entity_name, (span[0], span[1] + 1), span_text)
enref.enref_meta.set_is_enref(True)
enref.enref_meta.set_is_new(is_new)
enref.enref_meta.set_is_new_continued(False)
enref.enref_id.set(entities.index(entity_name))
enref.enref_properties.set_domain(domain)
if elements[1].startswith('g'):
members_decl = re.search(r'\((.*?)\)', elements[1])
if members_decl is None:
raise Exception('Cannot parse group declaration: %s' % elements[1])
members = members_decl.group(1).split(':')
if members == ['']:
members = []
member_ids = [entities.index(m) for m in members]
enref.enref_properties.set_is_group(True)
enref.enref_membership.set(member_ids, members)
else:
enref.enref_properties.set_is_group(False)
if domain == 'people':
gender = GENDER_FLAGS[elements[1][0]]
enref.enref_properties.set_gender(gender)
is_sender = entity_name == sender
is_recipient = not is_sender and entity_name in participants
enref.enref_context.set_is_sender(is_sender)
enref.enref_context.set_is_recipient(is_recipient)
enref.enref_context.set_message_offset(0)
enref.signals.set([])
logging.info('enref: %s', str(enref))
enrefs.append(enref)
return enrefs
def _add_bert_vecs(conversations,
bert_client):
"""Adds BERT embeddings to conversations."""
msgs = {}
# BERT embeddings are computed one batch at a time so it's inefficient to
# add them for each messages individually. Instead we collect all messages in
# one large dict and then run BERT on that dict and finally copy the
# embeddings back to the messages.
for conversation in conversations:
msgs.update({m.msg_id: m.token_ids for m in conversation.messages})
embeddings = bert_client.lookup(msgs)
for conversation in conversations:
for message in conversation.messages:
message.token_bertvecs = embeddings[message.msg_id]
def convert(input_path, output_path):
"""Converts a file with conversations into a TF Records file."""
logging.info('Loading Word2Vec embeddings from %s', FLAGS.wordvec_path)
wordvec_client = word2vec_client_module.Word2VecClient(FLAGS.wordvec_path)
logging.info('Loading tokenizer from %s', FLAGS.tokenizer_handle)
tokenizer = bert_client_module.Tokenizer(FLAGS.tokenizer_handle)
logging.info('Loading BERT embeddings from %s', FLAGS.bert_handle)
bert_client = bert_client_module.BertClient(FLAGS.bert_handle)
encodings = encoding.Encodings()
logging.info('Converting data from %s', input_path)
input_file_name = os.path.basename(input_path)
input_file_name = os.path.splitext(input_file_name)[0]
entities = []
conversations = []
conversation = None
conversation_id = 0
scenario_id = None
with tf.io.gfile.GFile(input_path, 'r') as input_file:
for line in input_file:
if not line.strip() and conversation:
entities = []
conversations.append(conversation)
conversation = None
scenario_id = None
conversation_id += 1
continue
logging.info('read line %s', line)
# Extract line sections
sections = line.strip().split('|')
sender = sections[0].strip()
utterance = sections[1].strip()
enrefs_section = sections[2].strip()
if sender.startswith('conv:'):
scenario_id = sender[5:]
sender = ENVIRONMENT
conversation = Conversation(conversation_id, scenario_id)
# Parse (enrefs)
enref_decls = enrefs_section.split('[')
enrefs = _parse_enrefs(encodings, entities, utterance, sender,
enref_decls)
# Parse words in utterance
words = utterance.lower().split(' ')
logging.info(words)
# Collect signals
signals = signals_module.collect_signals(words)
conversation.add_message(sender, words, signals, entities, enrefs)
conversation.add_wordvecs(wordvec_client)
conversation.tokenize(tokenizer, wordvec_client.empty_vec)
_add_bert_vecs(conversations, bert_client)
# Create output directory
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
output_file_name = (
os.path.splitext(os.path.basename(input_path))[0] + '.tfrecord')
data_file_path = os.path.join(output_path, output_file_name)
logging.info('Writing to %s', data_file_path)
# Write sequence examples
with tf.io.TFRecordWriter(data_file_path) as output_file:
for conversation in conversations:
seq_examples = _build_seq_examples(conversation, encodings)
for seq_example in seq_examples:
output_file.write(seq_example.SerializeToString())
def main(argv):
del argv
convert(FLAGS.input_file, FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
contrack/preprocess.py
|
Python
|
apache-2.0
| 14,924
|
#!/usr/bin/env python
"""
Usage: genrandt.py [-p] [-i FILE] [-t N] [-g G] [-b] [H W]
will generate a random gridworld of the height H and width W (default
is 5 by 10), with N trolls (default 1) at random positions, G goals
(default 2) at random positions, and dump the resulting specification.
Troll region radii are set using the variable TROLL_RADIUS (default 1).
If the flag "-p" is given, then generate a plot of the grid. If the
-i flag is given in addition to -p, then save the plot to a PNG image
FILE. Use the flag -b to use a representation where there is one
boolean variable in the specification per grid cell. Otherwise
(default), support for nonboolean domains by gr1c is used.
SCL; 7 May 2013.
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import tulip.gridworld as gw
TROLL_RADIUS=1
if __name__ == "__main__":
if len(sys.argv) > 11 or "-h" in sys.argv:
print "Usage: genrandt.py [-p] [-i FILE] [-t N] [-g G] [-b] [H W]"
exit(1)
if "-b" in sys.argv:
nonbool = False
sys.argv.remove("-b")
else:
nonbool = True
try:
targ_ind = sys.argv.index("-t")
if targ_ind > len(sys.argv)-2:
print "Invalid use of -t flag. Try \"-h\""
exit(1)
except ValueError:
targ_ind = -1
if targ_ind < 0:
N = 1
else:
N = int(sys.argv[targ_ind+1])
try:
garg_ind = sys.argv.index("-g")
if garg_ind > len(sys.argv)-2:
print "Invalid use of -g flag. Try \"-h\""
exit(1)
except ValueError:
garg_ind = -1
if garg_ind < 0:
num_goals = 2
else:
num_goals = int(sys.argv[garg_ind+1])
if "-p" in sys.argv:
print_pretty = True
sys.argv.remove("-p")
try:
iarg_ind = sys.argv.index("-i")+1
if iarg_ind > len(sys.argv)-1:
print "Invalid use of -i flag. Try \"-h\""
exit(1)
except ValueError:
iarg_ind = -1
else:
print_pretty = False
if len(sys.argv) >= 3 and sys.argv[-2][0] != "-":
(height, width) = (int(sys.argv[-2]), int(sys.argv[-1]))
else:
(height, width) = (5, 10)
Z = gw.random_world((height, width),
wall_density=0.2,
num_init=1,
num_goals=num_goals, num_trolls=N)
for i in range(len(Z.troll_list)):
Z.troll_list[i] = (Z.troll_list[i][0], TROLL_RADIUS)
print Z.pretty(show_grid=True, line_prefix="## ")
print Z.dumps(line_prefix="# ")
spec = Z.mspec()
print spec.dumpgr1c()
if print_pretty:
Z.plot(font_pt=0)
if iarg_ind == -1:
plt.show()
else:
plt.savefig(sys.argv[iarg_ind])
|
pombredanne/nTLP
|
examples/gridworlds/genrandt.py
|
Python
|
bsd-3-clause
| 2,809
|
from __future__ import division
from __future__ import unicode_literals
from builtins import range
from past.utils import old_div
import hashlib
import os
import random
import string
import tempfile
import re
import time
import urllib
from datetime import datetime
from datetime import timedelta
from elodie.compatability import _rename
from elodie.external.pyexiftool import ExifTool
from elodie.dependencies import get_exiftool
from elodie import constants
def checksum(file_path, blocksize=65536):
hasher = hashlib.sha256()
with open(file_path, 'rb') as f:
buf = f.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(blocksize)
return hasher.hexdigest()
return None
def create_working_folder(format=None):
temporary_folder = tempfile.gettempdir()
folder = os.path.join(temporary_folder, random_string(10, format), random_string(10, format))
os.makedirs(folder)
return (temporary_folder, folder)
def download_file(name, destination):
try:
url_to_file = 'https://s3.amazonaws.com/jmathai/github/elodie/{}'.format(name)
# urlretrieve works differently for python 2 and 3
if constants.python_version < 3:
final_name = '{}/{}{}'.format(destination, random_string(10), os.path.splitext(name)[1])
urllib.urlretrieve(
url_to_file,
final_name
)
else:
final_name, headers = urllib.request.urlretrieve(url_to_file)
return final_name
except Exception as e:
return False
def get_file(name):
file_path = get_file_path(name)
if not os.path.isfile(file_path):
return False
return file_path
def get_file_path(name):
current_folder = os.path.dirname(os.path.realpath(__file__))
return os.path.join(current_folder, 'files', name)
def get_test_location():
return (61.013710, 99.196656, 'Siberia')
def populate_folder(number_of_files, include_invalid=False):
folder = '%s/%s' % (tempfile.gettempdir(), random_string(10))
os.makedirs(folder)
for x in range(0, number_of_files):
ext = 'jpg' if x % 2 == 0 else 'txt'
fname = '%s/%s.%s' % (folder, x, ext)
with open(fname, 'a'):
os.utime(fname, None)
if include_invalid:
fname = '%s/%s' % (folder, 'invalid.invalid')
with open(fname, 'a'):
os.utime(fname, None)
return folder
def random_string(length, format=None):
format_choice = string.ascii_uppercase + string.digits
if format == 'int':
format_choice = string.digits
elif format == 'str':
format_choice = string.asci_uppercase
return ''.join(random.SystemRandom().choice(format_choice) for _ in range(length))
def random_decimal():
return random.random()
def random_coordinate(coordinate, precision):
# Here we add to the decimal section of the coordinate by a given precision
return coordinate + ((old_div(10.0, (10.0**precision))) * random_decimal())
def temp_dir():
return tempfile.gettempdir()
def is_windows():
return os.name == 'nt'
# path_tz_fix(file_name)
# Change timestamp in file_name by the offset
# between UTC and local time, i.e.
# 2015-12-05_00-59-26-with-title-some-title.jpg ->
# 2015-12-04_20-59-26-with-title-some-title.jpg
# (Windows only)
def path_tz_fix(file_name):
if is_windows():
# Calculate the offset between UTC and local time
tz_shift = old_div((datetime.fromtimestamp(0) -
datetime.utcfromtimestamp(0)).seconds,3600)
# replace timestamp in file_name
m = re.search('(\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2})',file_name)
t_date = datetime.fromtimestamp(time.mktime(time.strptime(m.group(0), '%Y-%m-%d_%H-%M-%S')))
s_date_fix = (t_date-timedelta(hours=tz_shift)).strftime('%Y-%m-%d_%H-%M-%S')
return re.sub('\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}',s_date_fix,file_name)
else:
return file_name
# time_convert(s_time)
# Change s_time (struct_time) by the offset
# between UTC and local time
# (Windows only)
def time_convert(s_time):
if is_windows():
return time.gmtime((time.mktime(s_time)))
else:
return s_time
# isclose(a,b,rel_tol)
# To compare float coordinates a and b
# with relative tolerance c
def isclose(a, b, rel_tol = 1e-8):
if not isinstance(a, (int, float)) or not isinstance(b, (int, float)):
return False
diff = abs(a - b)
return (diff <= abs(rel_tol * a) and
diff <= abs(rel_tol * b))
def reset_dbs():
""" Back up hash_db and location_db """
# This is no longer needed. See gh-322
# https://github.com/jmathai/elodie/issues/322
pass
def restore_dbs():
""" Restore back ups of hash_db and location_db """
# This is no longer needed. See gh-322
# https://github.com/jmathai/elodie/issues/322
pass
def setup_module():
exiftool_addedargs = [
u'-config',
u'"{}"'.format(constants.exiftool_config)
]
ExifTool(executable_=get_exiftool(), addedargs=exiftool_addedargs).start()
def teardown_module():
ExifTool().terminate
|
jmathai/elodie
|
elodie/tests/helper.py
|
Python
|
apache-2.0
| 5,170
|
import mock
from unittest import TestCase
from pantsmud.driver import hook, session
from pantsmud.util import error
class TestSessionClass(TestCase):
def setUp(self):
self.session = session.Session(mock.MagicMock())
self.session.environment = mock.MagicMock()
self.identity = mock.MagicMock()
self.session.environment.identities = {self.identity.uuid: self.identity}
self.mobile = mock.MagicMock()
self.session.environment.entities = {self.mobile.uuid: self.mobile}
self.ih = mock.MagicMock()
self.state = mock.MagicMock()
def test_is_client(self):
self.assertTrue(self.session.is_client)
def test_identity(self):
self.session.identity_uuid = self.identity.uuid
self.assertEqual(self.session.identity, self.identity)
def test_identity_when_uuid_is_none(self):
self.session.identity_uuid = None
self.assertIsNone(self.session.identity)
def test_set_identity(self):
self.session.identity = self.identity
self.assertEqual(self.session.identity_uuid, self.identity.uuid)
def test_set_identity_to_none(self):
self.session.identity = None
self.assertIsNone(self.session.identity_uuid)
def test_mobile(self):
self.session.mobile_uuid = self.mobile.uuid
self.assertEqual(self.session.mobile, self.mobile)
def test_mobile_when_uuid_is_none(self):
self.session.mobile_uuid = None
self.assertIsNone(self.session.mobile)
def test_set_mobile(self):
self.session.mobile = self.mobile
self.assertEqual(self.session.mobile_uuid, self.mobile.uuid)
def test_set_mobile_to_none(self):
self.session.mobile = None
self.assertIsNone(self.session.mobile_uuid)
def test_input_handler(self):
self.session.input_handlers.append((self.ih, None))
self.assertEqual(self.ih, self.session.input_handler)
def test_input_handler_fails_when_none_added(self):
self.assertRaises(error.BrainMissingInputHandlers, getattr, self.session, "input_handler")
def test_state(self):
self.session.input_handlers.append((None, self.state))
self.assertEqual(self.state, self.session.state)
def test_state_fails_when_none_added(self):
self.assertRaises(error.BrainMissingInputHandlers, getattr, self.session, "state")
def test_push_input_handler(self):
self.session.push_input_handler(self.ih, self.state)
self.assertEqual(self.ih, self.session.input_handler)
self.assertEqual(self.state, self.session.state)
def test_pop_input_handler(self):
self.session.push_input_handler(self.ih, self.state)
ih, state = self.session.pop_input_handler()
self.assertEqual(ih, self.ih)
self.assertEqual(state, self.state)
def test_pop_input_handler_fails_when_none_added(self):
self.assertRaises(error.BrainMissingInputHandlers, self.session.pop_input_handler)
class TestSessionFunctions(TestCase):
def setUp(self):
session.init()
self.open_brain_hook = mock.MagicMock()
self.open_brain_hook.__name__ = "open_brain_hook"
self.close_brain_hook = mock.MagicMock()
self.close_brain_hook.__name__ = "close_brain_hook"
hook.add(hook.HOOK_OPEN_BRAIN, self.open_brain_hook)
hook.add(hook.HOOK_CLOSE_BRAIN, self.close_brain_hook)
def tearDown(self):
session.init()
def test_open_session(self):
stream = mock.MagicMock()
sess = session.open_session(stream)
self.assertEqual(sess.stream, stream)
self.open_brain_hook.assert_called_once_with(hook.HOOK_OPEN_BRAIN, sess)
def test_close_session(self):
stream = mock.MagicMock()
sess1 = session.open_session(stream)
sess2 = session.close_session(stream)
self.assertEqual(sess1, sess2)
self.close_brain_hook.assert_called_once_with(hook.HOOK_CLOSE_BRAIN, sess1)
def test_get_session_after_open(self):
stream = mock.MagicMock()
self.assertRaises(KeyError, session.get_session, stream)
sess1 = session.open_session(stream)
sess2 = session.get_session(stream)
self.assertEqual(sess1, sess2)
def test_get_session_fails_after_close(self):
stream = mock.MagicMock()
session.open_session(stream)
session.close_session(stream)
self.assertRaises(KeyError, session.get_session, stream)
|
ecdavis/pantsmud
|
test/pantsmud/driver/test_session.py
|
Python
|
apache-2.0
| 4,484
|
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for filtered_values_cache.py."""
import collections
from absl.testing import absltest
from tf_coder.value_search import filtered_values_cache
from tf_coder.value_search import value
def _value(wrapped_value):
"""A simple utility to create Value objects."""
return value.ConstantValue(wrapped_value)
def _self_mapping(items):
"""Turns a list into an identity mapping dict."""
return collections.OrderedDict(zip(items, items))
class FilteredValuesCacheTest(absltest.TestCase):
def test_filter_values(self):
filter_cache = filtered_values_cache.FilteredValuesCache()
values_iterable = _self_mapping([_value(2), _value(3), _value(4)])
even_filter = lambda arg_value: arg_value.value % 2 == 0
self.assertEqual(
filter_cache.filter_values(even_filter, 1, values_iterable),
[_value(2), _value(4)])
self.assertEqual(
filter_cache.filter_values(None, 1, values_iterable),
[_value(2), _value(3), _value(4)])
# Check that the cache is used: even if the collection of values is
# different, the cached results remain unchanged.
self.assertEqual(
filter_cache.filter_values(even_filter, 1, None),
[_value(2), _value(4)])
self.assertEqual(
filter_cache.filter_values(None, 1, None),
[_value(2), _value(3), _value(4)])
if __name__ == '__main__':
absltest.main()
|
google-research/tensorflow-coder
|
tf_coder/value_search/filtered_values_cache_test.py
|
Python
|
apache-2.0
| 1,986
|
#
# Create the WP1 Graphics ROM.
#
# The original spec allowed for 64 ASCII characters (6 bit ASCII) only. I have extended
# this with something allowing 32x24 pixel resolution, and some graphics characters which
# come from a mixture of the Superboard II/UK101 Character ROM, and the Sharp MZ80K Rom
#
# At present, characters 224-239 of the ROM do not have any graphic allocated to them.
#
from PIL import Image,ImageDraw
def copy(src,fr,to,tgt,pos):
for i in range(fr * 8,(to + 1) * 8):
tgt[pos * 8 - fr * 8 + i] = src[i]
def reverse(n):
if n == 0 or n == 255:
return n
r = 0
for i in range(0,8):
if (n & (0x80 >> i)) != 0:
r = r | (0x01 << i)
return r
def setdef(ch,pattern):
pattern.append(0)
for i in range(0,8):
wp1[ch*8+i] = pattern[i]
sbr = open("chargen.rom","rb").read(-1) # read in SB2 ROM
sbr = [reverse(ord(x)) for x in sbr] # convert to numbers
mz = open("mz80k.rom","rb").read(-1) # read in MZ80K ROM
mz = [ord(x) for x in mz] # convert to numbers
wp1 = [ 0 ] * 256 * 8 # empty wp1
for i in range(128,240): # default is RS for top half
wp1[i*8+0] = 0x3C
wp1[i*8+1] = 0x66
wp1[i*8+2] = 0x42
wp1[i*8+3] = 0x42
wp1[i*8+4] = 0x66
wp1[i*8+5] = 0x7E
wp1[i*8+6] = 0x66
wp1[i*8+7] = 0x3C
for i in range(32,96): # 6 bit ASCII up front (0-64)
copy(sbr,i,i,wp1,i & 0x3F)
for i in range(1,26): # Use MZ80K Alphanumerics
copy(mz,i,i,wp1,i)
for i in range(0,10):
copy(mz,i+32,i+32,wp1,i+48)
for i in range(64,128): # 64..127 is 2 x 3 graphics
if ((i & 1) != 0):
wp1[i*8+0] |= 0xF0
wp1[i*8+1] |= 0xF0
wp1[i*8+2] |= 0xF0
if ((i & 2) != 0):
wp1[i*8+0] |= 0x0F
wp1[i*8+1] |= 0x0F
wp1[i*8+2] |= 0x0F
if ((i & 4) != 0):
wp1[i*8+3] |= 0xF0
wp1[i*8+4] |= 0xF0
if ((i & 8) != 0):
wp1[i*8+3] |= 0x0F
wp1[i*8+4] |= 0x0F
if ((i & 16) != 0):
wp1[i*8+5] |= 0xF0
wp1[i*8+6] |= 0xF0
wp1[i*8+7] |= 0xF0
if ((i & 32) != 0):
wp1[i*8+5] |= 0x0F
wp1[i*8+6] |= 0x0F
wp1[i*8+7] |= 0x0F
copy(sbr,128,143,wp1,128) # 128..143 are single h/v lines
copy(sbr,175,178,wp1,144) # 144..147 diagonal blocks
copy(sbr,188,190,wp1,148) # 148..150 diagonal lines/cross
copy(sbr,183,187,wp1,151) # 151..155 half-colours
copy(sbr,207,210,wp1,156) # 156..159 square edges
copy(sbr,229,232,wp1,160) # 160..163 card suits
copy(sbr,236,239,wp1,164) # 164..167 plane
copy(sbr,248,255,wp1,168) # 168..175 tanks
copy(sbr,16,23,wp1,176) # 176..183 missiles
copy(sbr,242,247,wp1,184) # 184..189 guns
copy(sbr,4,4,wp1,190) # 190 bush
copy(sbr,13,15,wp1,191) # 191..193 tree, houses
copy(mz,200,207,wp1,194) # 194..201 car, people, face
copy(mz,199,199,wp1,202) # 202 invader
copy(mz,71,72,wp1,203) # 203,204 filled, unfilled circle
copy(sbr,226,226,wp1,205) # 205 larger circle
copy(sbr,5,12,wp1,206) # 206..213 sub, enterprise
copy(sbr,179,182,wp1,214) # 214..217 ship
copy(sbr,154,155,wp1,218) # 218..219 half blocks
copy(sbr,165,168,wp1,220) # 220..223 corner blocks
for i in range(240,256): # 240..255 grid
c = 0
if (i & 1) != 0:
wp1[i*8+0] = wp1[i*8+1] = wp1[i*8+2] = wp1[i*8+3] = 0x08
if (i & 2) != 0:
wp1[i*8+4] = wp1[i*8+5] = wp1[i*8+6] = wp1[i*8+7] = 0x08
if (i & 4) != 0:
wp1[i*8+3] |= 0xF8;
if (i & 8) != 0:
wp1[i*8+3] |= 0x0F;
setdef(224,[0x00,0x31,0x31,0x7B,0x7B,0x31,0x31]) # klingon +++
setdef(225,[0x00,0x8C,0x8C,0xDE,0xDE,0x8C,0x8C])
setdef(226,[0x09,0x05,0x03,0x0F,0x03,0x05,0x09]) # star *
setdef(227,[0x20,0x40,0x80,0xE0,0x80,0x40,0x20])
setdef(228,[0x41,0x23,0x13,0x0B,0x11,0x20,0x41]) # starbase >!<
setdef(229,[0x04,0x88,0x90,0xA0,0x10,0x08,0x04])
setdef(230,[0x11,0x25,0x43,0x8F,0x43,0x25,0x11]) # enterprise <*>
setdef(231,[0x10,0x48,0x84,0xE2,0x84,0x48,0x10])
size = 4 # pixel size
spacing = 0 # character spacing
iSize = 16 * (size * 8 + spacing) # render height + width
render = Image.new("RGBA",(iSize,iSize),0xFF0000)
iDraw = ImageDraw.Draw(render)
for c in range(0,256):
x = (c % 16) * (size * 8 + spacing) + spacing / 2
y = (c / 16) * (size * 8 + spacing) + spacing / 2
iDraw.rectangle([x,y,x+size*8,y+size*8],0x000000,None)
for y1 in range(0,8):
b = wp1[c*8+y1]
for x1 in range(0,8):
if (b & (0x80 >> x1)) != 0:
iDraw.rectangle([x+x1*size,y+y1*size,x+x1*size+size-1,y+y1*size+size-1],0xFFFFFF,None)
open("__font8x8.h","w").write(",".join([str(x) for x in wp1])) # write it out.
render.show()
|
paulscottrobson/wallpaper-one
|
miscellany/font/process.py
|
Python
|
mit
| 4,698
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
Override this file to handle your authenticating / login.
Copy and alter this file and put in your PYTHONPATH as airflow_login.py,
the new module will override this one.
'''
import flask_login
from flask_login import login_required, current_user, logout_user
from flask import url_for, redirect
from airflow import settings
from airflow import models
from airflow.utils.db import provide_session
DEFAULT_USERNAME = 'airflow'
login_manager = flask_login.LoginManager()
login_manager.login_view = 'airflow.login' # Calls login() below
login_manager.login_message = None
class DefaultUser(object):
def __init__(self, user):
self.user = user
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def data_profiling(self):
'''Provides access to data profiling tools'''
return True
def is_superuser(self):
'''Access all the things'''
return True
#models.User = User # hack!
#del User
@login_manager.user_loader
@provide_session
def load_user(userid, session=None):
user = session.query(models.User).filter(models.User.id == userid).first()
return DefaultUser(user)
@provide_session
def login(self, request, session=None):
user = session.query(models.User).filter(
models.User.username == DEFAULT_USERNAME).first()
if not user:
user = models.User(
username=DEFAULT_USERNAME,
is_superuser=True)
session.merge(user)
session.commit()
flask_login.login_user(DefaultUser(user))
session.commit()
return redirect(request.args.get("next") or url_for("index"))
|
RealImpactAnalytics/airflow
|
airflow/default_login.py
|
Python
|
apache-2.0
| 2,618
|
#!/usr/bin/env python
################################################################################
# Copyright 2015 Brecht Baeten
# This file is part of mpcpy.
#
# mpcpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mpcpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mpcpy. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import unittest
import mpcpy
import numpy as np
import sys
import os
# current path
modulepath = os.path.abspath(os.path.dirname(sys.modules[__name__].__file__))
class TestEmulator(unittest.TestCase):
def setUp(self):
self.ini = {'C_em.T': 22+273.15, 'C_in.T': 21+273.15}
self.par = {'C_em.C': 10e6,
'C_in.C': 5e6,
'UA_in_amb.G': 200,
'UA_em_in.G': 1600}
self.inp = {
'time': np.array([0. , 3600. , 7200.]),
'T_amb': np.array([273.15, 274.15, 275.15]),
'Q_flow_sol': np.array([500. , 400. , 300.]),
'Q_flow_hp': np.array([4000. , 4000. , 4000.])
}
def test_create(self):
emulator = mpcpy.Emulator([])
def test_call(self):
emulator = mpcpy.Emulator([])
emulator(self.inp['time'],self.inp)
self.assertEqual(emulator.res['time'][1],self.inp['time'][1])
self.assertEqual(emulator.res['Q_flow_sol'][2],self.inp['Q_flow_sol'][2])
if __name__ == '__main__':
unittest.main()
|
BrechtBa/mpcpy
|
tests/emulator.py
|
Python
|
gpl-3.0
| 1,982
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
This module contains the main interface to the botocore package, the
Session object.
"""
import copy
import logging
import os
import platform
from botocore import __version__
import botocore.configloader
import botocore.credentials
import botocore.client
from botocore.exceptions import ConfigNotFound, ProfileNotFound
from botocore.exceptions import UnknownServiceError
from botocore import handlers
from botocore.hooks import HierarchicalEmitter, first_non_none_response
from botocore.loaders import create_loader
from botocore.parsers import ResponseParserFactory
from botocore.regions import EndpointResolver
from botocore.model import ServiceModel
from botocore import paginate
from botocore import waiter
from botocore import retryhandler, translate
class Session(object):
"""
The Session object collects together useful functionality
from `botocore` as well as important data such as configuration
information and credentials into a single, easy-to-use object.
:ivar available_profiles: A list of profiles defined in the config
file associated with this session.
:ivar profile: The current profile.
"""
#: A default dictionary that maps the logical names for session variables
#: to the specific environment variables and configuration file names
#: that contain the values for these variables.
#: When creating a new Session object, you can pass in your own dictionary
#: to remap the logical names or to add new logical names. You can then
#: get the current value for these variables by using the
#: ``get_config_variable`` method of the :class:`botocore.session.Session`
#: class.
#: These form the keys of the dictionary. The values in the dictionary
#: are tuples of (<config_name>, <environment variable>, <default value>,
#: <conversion func>).
#: The conversion func is a function that takes the configuration value
#: as an argument and returns the converted value. If this value is
#: None, then the configuration value is returned unmodified. This
#: conversion function can be used to type convert config values to
#: values other than the default values of strings.
#: The ``profile`` and ``config_file`` variables should always have a
#: None value for the first entry in the tuple because it doesn't make
#: sense to look inside the config file for the location of the config
#: file or for the default profile to use.
#: The ``config_name`` is the name to look for in the configuration file,
#: the ``env var`` is the OS environment variable (``os.environ``) to
#: use, and ``default_value`` is the value to use if no value is otherwise
#: found.
SESSION_VARIABLES = {
# logical: config_file, env_var, default_value, conversion_func
'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None),
'region': ('region', 'AWS_DEFAULT_REGION', None, None),
'data_path': ('data_path', 'AWS_DATA_PATH', None, None),
'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None),
'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
# This is the shared credentials file amongst sdks.
'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE',
'~/.aws/credentials', None),
# These variables only exist in the config file.
# This is the number of seconds until we time out a request to
# the instance metadata service.
'metadata_service_timeout': ('metadata_service_timeout',
'AWS_METADATA_SERVICE_TIMEOUT', 1, int),
# This is the number of request attempts we make until we give
# up trying to retrieve data from the instance metadata service.
'metadata_service_num_attempts': ('metadata_service_num_attempts',
'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int),
}
#: The default format string to use when configuring the botocore logger.
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
"""
Create a new Session object.
:type session_vars: dict
:param session_vars: A dictionary that is used to override some or all
of the environment variables associated with this session. The
key/value pairs defined in this dictionary will override the
corresponding variables defined in ``SESSION_VARIABLES``.
:type event_hooks: BaseEventHooks
:param event_hooks: The event hooks object to use. If one is not
provided, an event hooks object will be automatically created
for you.
:type include_builtin_handlers: bool
:param include_builtin_handlers: Indicates whether or not to
automatically register builtin handlers.
:type profile: str
:param profile: The name of the profile to use for this
session. Note that the profile can only be set when
the session is created.
"""
self.session_var_map = copy.copy(self.SESSION_VARIABLES)
if session_vars:
self.session_var_map.update(session_vars)
if event_hooks is None:
self._events = HierarchicalEmitter()
else:
self._events = event_hooks
if include_builtin_handlers:
self._register_builtin_handlers(self._events)
self.user_agent_name = 'Botocore'
self.user_agent_version = __version__
self.user_agent_extra = ''
# The _profile attribute is just used to cache the value
# of the current profile to avoid going through the normal
# config lookup process each access time.
self._profile = None
self._config = None
self._credentials = None
self._profile_map = None
# This is a dict that stores per session specific config variable
# overrides via set_config_variable().
self._session_instance_vars = {}
if profile is not None:
self._session_instance_vars['profile'] = profile
self._client_config = None
self._components = ComponentLocator()
self._register_components()
def _register_components(self):
self._register_credential_provider()
self._register_data_loader()
self._register_endpoint_resolver()
self._register_event_emitter()
self._register_response_parser_factory()
def _register_event_emitter(self):
self._components.register_component('event_emitter', self._events)
def _register_credential_provider(self):
self._components.lazy_register_component(
'credential_provider',
lambda: botocore.credentials.create_credential_resolver(self))
def _register_data_loader(self):
self._components.lazy_register_component(
'data_loader',
lambda: create_loader(self.get_config_variable('data_path')))
def _register_endpoint_resolver(self):
def create_default_resolver():
loader = self.get_component('data_loader')
endpoints = loader.load_data('endpoints')
return EndpointResolver(endpoints)
self._components.lazy_register_component(
'endpoint_resolver', create_default_resolver)
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
ResponseParserFactory())
def _register_builtin_handlers(self, events):
for spec in handlers.BUILTIN_HANDLERS:
if len(spec) == 2:
event_name, handler = spec
self.register(event_name, handler)
else:
event_name, handler, register_type = spec
if register_type is handlers.REGISTER_FIRST:
self._events.register_first(event_name, handler)
elif register_type is handlers.REGISTER_LAST:
self._events.register_last(event_name, handler)
@property
def available_profiles(self):
return list(self._build_profile_map().keys())
def _build_profile_map(self):
# This will build the profile map if it has not been created,
# otherwise it will return the cached value. The profile map
# is a list of profile names, to the config values for the profile.
if self._profile_map is None:
self._profile_map = self.full_config['profiles']
return self._profile_map
@property
def profile(self):
if self._profile is None:
profile = self.get_config_variable('profile')
self._profile = profile
return self._profile
def get_config_variable(self, logical_name,
methods=('instance', 'env', 'config')):
"""
Retrieve the value associated with the specified logical_name
from the environment or the config file. Values found in the
environment variable take precedence of values found in the
config file. If no value can be found, a None will be returned.
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to retrieve. This name will be mapped to the
appropriate environment variable name for this session as
well as the appropriate config file entry.
:type method: tuple
:param method: Defines which methods will be used to find
the variable value. By default, all available methods
are tried but you can limit which methods are used
by supplying a different value to this parameter.
Valid choices are: instance|env|config
:returns: value of variable or None if not defined.
"""
# Handle all the short circuit special cases first.
if logical_name not in self.session_var_map:
return
# Do the actual lookups. We need to handle
# 'instance', 'env', and 'config' locations, in that order.
value = None
var_config = self.session_var_map[logical_name]
if self._found_in_instance_vars(methods, logical_name):
return self._session_instance_vars[logical_name]
elif self._found_in_env(methods, var_config):
value = self._retrieve_from_env(var_config[1], os.environ)
elif self._found_in_config_file(methods, var_config):
value = self.get_scoped_config()[var_config[0]]
if value is None:
value = var_config[2]
if var_config[3] is not None:
value = var_config[3](value)
return value
def _found_in_instance_vars(self, methods, logical_name):
if 'instance' in methods:
return logical_name in self._session_instance_vars
return False
def _found_in_env(self, methods, var_config):
return (
'env' in methods and
var_config[1] is not None and
self._retrieve_from_env(var_config[1], os.environ) is not None)
def _found_in_config_file(self, methods, var_config):
if 'config' in methods and var_config[0] is not None:
return var_config[0] in self.get_scoped_config()
return False
def _retrieve_from_env(self, names, environ):
# We need to handle the case where names is either
# a single value or a list of variables.
if not isinstance(names, list):
names = [names]
for name in names:
if name in environ:
return environ[name]
return None
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
self._session_instance_vars[logical_name] = value
def get_scoped_config(self):
"""
Returns the config values from the config file scoped to the current
profile.
The configuration data is loaded **only** from the config file.
It does not resolve variables based on different locations
(e.g. first from the session instance, then from environment
variables, then from the config file). If you want this lookup
behavior, use the ``get_config_variable`` method instead.
Note that this configuration is specific to a single profile (the
``profile`` session variable).
If the ``profile`` session variable is set and the profile does
not exist in the config file, a ``ProfileNotFound`` exception
will be raised.
:raises: ConfigNotFound, ConfigParseError, ProfileNotFound
:rtype: dict
"""
profile_name = self.get_config_variable('profile')
profile_map = self._build_profile_map()
# If a profile is not explicitly set return the default
# profile config or an empty config dict if we don't have
# a default profile.
if profile_name is None:
return profile_map.get('default', {})
elif profile_name not in profile_map:
# Otherwise if they specified a profile, it has to
# exist (even if it's the default profile) otherwise
# we complain.
raise ProfileNotFound(profile=profile_name)
else:
return profile_map[profile_name]
@property
def full_config(self):
"""Return the parsed config file.
The ``get_config`` method returns the config associated with the
specified profile. This property returns the contents of the
**entire** config file.
:rtype: dict
"""
if self._config is None:
try:
config_file = self.get_config_variable('config_file')
self._config = botocore.configloader.load_config(config_file)
except ConfigNotFound:
self._config = {'profiles': {}}
try:
# Now we need to inject the profiles from the
# credentials file. We don't actually need the values
# in the creds file, only the profile names so that we
# can validate the user is not referring to a nonexistent
# profile.
cred_file = self.get_config_variable('credentials_file')
cred_profiles = botocore.configloader.raw_config_parse(cred_file)
for profile in cred_profiles:
cred_vars = cred_profiles[profile]
if profile not in self._config['profiles']:
self._config['profiles'][profile] = cred_vars
else:
self._config['profiles'][profile].update(cred_vars)
except ConfigNotFound:
pass
return self._config
def get_default_client_config(self):
"""Retrieves the default config for creating clients
:rtype: botocore.client.Config
:returns: The default client config object when creating clients. If
the value is ``None`` then there is no default config object
attached to the session.
"""
return self._client_config
def set_default_client_config(self, client_config):
"""Sets the default config for creating clients
:type client_config: botocore.client.Config
:param client_config: The default client config object when creating
clients. If the value is ``None`` then there is no default config
object attached to the session.
"""
self._client_config = client_config
def set_credentials(self, access_key, secret_key, token=None):
"""
Manually create credentials for this session. If you would
prefer to use botocore without a config file, environment variables,
or IAM roles, you can pass explicit credentials into this
method to establish credentials for this session.
:type access_key: str
:param access_key: The access key part of the credentials.
:type secret_key: str
:param secret_key: The secret key part of the credentials.
:type token: str
:param token: An option session token used by STS session
credentials.
"""
self._credentials = botocore.credentials.Credentials(access_key,
secret_key,
token)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
if self._credentials is None:
self._credentials = self._components.get_component(
'credential_provider').load_credentials()
return self._credentials
def user_agent(self):
"""
Return a string suitable for use as a User-Agent header.
The string will be of the form:
<agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>
Where:
- agent_name is the value of the `user_agent_name` attribute
of the session object (`Boto` by default).
- agent_version is the value of the `user_agent_version`
attribute of the session object (the botocore version by default).
by default.
- py_ver is the version of the Python interpreter beng used.
- plat_name is the name of the platform (e.g. Darwin)
- plat_ver is the version of the platform
If ``user_agent_extra`` is not empty, then this value will be
appended to the end of the user agent string.
"""
base = '%s/%s Python/%s %s/%s' % (self.user_agent_name,
self.user_agent_version,
platform.python_version(),
platform.system(),
platform.release())
if self.user_agent_extra:
base += ' %s' % self.user_agent_extra
return base
def get_data(self, data_path):
"""
Retrieve the data associated with `data_path`.
:type data_path: str
:param data_path: The path to the data you wish to retrieve.
"""
return self.get_component('data_loader').load_data(data_path)
def get_service_model(self, service_name, api_version=None):
"""Get the service model object.
:type service_name: string
:param service_name: The service name
:type api_version: string
:param api_version: The API version of the service. If none is
provided, then the latest API version will be used.
:rtype: L{botocore.model.ServiceModel}
:return: The botocore service model for the service.
"""
service_description = self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
def get_waiter_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
waiter_config = loader.load_service_model(
service_name, 'waiters-2', api_version)
return waiter.WaiterModel(waiter_config)
def get_paginator_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
paginator_config = loader.load_service_model(
service_name, 'paginators-1', api_version)
return paginate.PaginatorModel(paginator_config)
def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
self._events.emit('service-data-loaded.%s' % service_name,
service_data=service_data,
service_name=service_name, session=self)
return service_data
def get_available_services(self):
"""
Return a list of names of available services.
"""
return self.get_component('data_loader')\
.list_available_services(type_name='service-2')
def set_debug_logger(self, logger_name='botocore'):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
self.set_stream_logger(logger_name, logging.DEBUG)
def set_stream_logger(self, logger_name, log_level, stream=None,
format_string=None):
"""
Convenience method to configure a stream logger.
:type logger_name: str
:param logger_name: The name of the logger to configure
:type log_level: str
:param log_level: The log level to set for the logger. This
is any param supported by the ``.setLevel()`` method of
a ``Log`` object.
:type stream: file
:param stream: A file like object to log to. If none is provided
then sys.stderr will be used.
:type format_string: str
:param format_string: The format string to use for the log
formatter. If none is provided this will default to
``self.LOG_FORMAT``.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
ch.setLevel(log_level)
# create formatter
if format_string is None:
format_string = self.LOG_FORMAT
formatter = logging.Formatter(format_string)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def set_file_logger(self, log_level, path, logger_name='botocore'):
"""
Convenience function to quickly configure any level of logging
to a file.
:type log_level: int
:param log_level: A log level as specified in the `logging` module
:type path: string
:param path: Path to the log file. The file will be created
if it doesn't already exist.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path)
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(self.LOG_FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to invoke when the event
is emitted. This object must be callable, and must
accept ``**kwargs``. If either of these preconditions are
not met, a ``ValueError`` will be raised.
:type unique_id: str
:param unique_id: An optional identifier to associate with the
registration. A unique_id can only be used once for
the entire session registration (unless it is unregistered).
This can be used to prevent an event handler from being
registered twice.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every register call
using the unique id has been matched by an ``unregister`` call.
If ``unique_id`` is specified, subsequent ``register``
calls must use the same value for ``unique_id_uses_count``
as the ``register`` call that first registered the event.
:raises ValueError: If the call to ``register`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.register(event_name, handler, unique_id,
unique_id_uses_count=unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to unregister.
:type unique_id: str
:param unique_id: A unique identifier identifying the callback
to unregister. You can provide either the handler or the
unique_id, you do not have to provide both.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every ``register``
call using the ``unique_id`` has been matched by an ``unregister``
call. If the ``unique_id`` is specified, subsequent
``unregister`` calls must use the same value for
``unique_id_uses_count`` as the ``register`` call that first
registered the event.
:raises ValueError: If the call to ``unregister`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.unregister(event_name, handler=handler,
unique_id=unique_id,
unique_id_uses_count=unique_id_uses_count)
def emit(self, event_name, **kwargs):
return self._events.emit(event_name, **kwargs)
def emit_first_non_none_response(self, event_name, **kwargs):
responses = self._events.emit(event_name, **kwargs)
return first_non_none_response(responses)
def get_component(self, name):
return self._components.get_component(name)
def register_component(self, name, component):
self._components.register_component(name, component)
def lazy_register_component(self, name, component):
self._components.lazy_register_component(name, component)
def create_client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""Create a botocore client.
:type service_name: string
:param service_name: The name of the service for which a client will
be created. You can use the ``Sesssion.get_available_services()``
method to get a list of all available service names.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
:rtype: botocore.client.BaseClient
:return: A botocore client instance
"""
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
# Figure out the user-provided region based on the various
# configuration options.
if region_name is None:
if config and config.region_name is not None:
region_name = config.region_name
else:
region_name = self.get_config_variable('region')
# Figure out the verify value base on the various
# configuration options.
if verify is None:
verify = self.get_config_variable('ca_bundle')
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if aws_secret_access_key is not None:
credentials = botocore.credentials.Credentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
else:
credentials = self.get_credentials()
endpoint_resolver = self.get_component('endpoint_resolver')
client_creator = botocore.client.ClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory)
client = client_creator.create_client(
service_name=service_name, region_name=region_name,
is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify,
credentials=credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
return client
def get_available_partitions(self):
"""Lists the available partitions found on disk
:rtype: list
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
"""
resolver = self.get_component('endpoint_resolver')
return resolver.get_available_partitions()
def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the region and endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3).
This parameter accepts a service name (e.g., "elb") or endpoint
prefix (e.g., "elasticloadbalancing").
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
resolver = self.get_component('endpoint_resolver')
results = []
try:
service_data = self.get_service_data(service_name)
endpoint_prefix = service_data['metadata'].get(
'endpointPrefix', service_name)
results = resolver.get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
except UnknownServiceError:
pass
return results
class ComponentLocator(object):
"""Service locator for session components."""
def __init__(self):
self._components = {}
self._deferred = {}
def get_component(self, name):
if name in self._deferred:
factory = self._deferred[name]
self._components[name] = factory()
# Only delete the component from the deferred dict after
# successfully creating the object from the factory as well as
# injecting the instantiated value into the _components dict.
del self._deferred[name]
try:
return self._components[name]
except KeyError:
raise ValueError("Unknown component: %s" % name)
def register_component(self, name, component):
self._components[name] = component
try:
del self._deferred[name]
except KeyError:
pass
def lazy_register_component(self, name, no_arg_factory):
self._deferred[name] = no_arg_factory
try:
del self._components[name]
except KeyError:
pass
def get_session(env_vars=None):
"""
Return a new session object.
"""
return Session(env_vars)
|
rest-of/the-deck
|
lambda/lib/python2.7/site-packages/botocore/session.py
|
Python
|
mit
| 37,568
|
from core.entities import DefaultRaceEntity
class DwarfRaceEntity(DefaultRaceEntity):
def __init__(self):
super(DwarfRaceEntity, self).__init__()
self.set_ability_score(constitution=2, wisdom=2)
|
jklemm/py-dnd
|
core/entities/dwarf_race_entity.py
|
Python
|
mit
| 217
|
"""
.. moduleauthor:: Gilbert Maitre <gilbert.maitre@hevs.ch>
"""
from enum import Enum
from gridsim.decorators import accepts
from gridsim.core import AbstractSimulationElement
from gridsim.unit import units
from gridsim.util import Position
class AbstractElectricalElement(AbstractSimulationElement):
@accepts((1, str))
def __init__(self, friendly_name):
"""
__init__(self, friendly_name)
This class is the base for all element that can take place in the
electrical simulator. It is based on the general
:class:`gridsim.core.AbstractSimulationElement`. At
initialization the user has to give the element ``friendly_name``.
:param friendly_name: Friendly name for the element.
Should be unique within the simulation module.
:type friendly_name: str
"""
super(AbstractElectricalElement, self).__init__(friendly_name)
class ElectricalBus(AbstractElectricalElement):
class Type(Enum):
SLACK_BUS = 0
"""
Type for slack (or swing) bus, i.e. the bus insuring that produced
power is balanced to consumed power. Since it is unique, there is no
input parameter for the slack bus.
"""
PV_BUS = 1
"""
Type for bus of type PV, i.e. bus where active power (P) and voltage
amplitude (V) are given by the element(s) (generator) attached to the
bus. Reactive power (Q) and voltage angle (Th) are then fixed by the
network.
"""
PQ_BUS = 2
"""
Type for bus of type PQ, i.e. bus where active power (P) and
reactive power (Q) are given by the element(s) (load) attached to the
bus. Voltage amplitude (V) and voltage angle (Th) are then fixed by the
network.
"""
@accepts((1, str), (2, Type), (3, Position))
def __init__(self, friendly_name, bus_type, position=Position()):
"""
__init__(self, friendly_name, bus_type, position=Position())
This class is the base for all type of buses (i.e. nodes) in the
considered electrical network. It is based on the general
:class:`AbstractElectricalElement` class. At initialization the user
has to give the bus ``friendly_name``.
If there is interest for the geographical ``position`` of the element,
defined by the :class:`gridsim.util.Position` class. Apart from
the methods provided by the superclass
:class:`.AbstractElectricalElement`, this class provides the method
``position`` for getting the position property of the object.
The chosen representation for bus electrical values is :
active power (P), reactive power (Q), voltage amplitude
(V), and voltage phase (Th). Their default values are None.
:param friendly_name: Friendly name for the element.
Should be unique within the simulation module.
:type friendly_name: str
:param bus_type: The type of the bus. Note that Slack Bus is
automatically added to the simulation
:type bus_type: :class:`ElectricalBus.Type`
:param position: Bus geographical position.
Defaults to Position default value.
:type position: :class:`.Position`
"""
super(ElectricalBus, self).__init__(friendly_name)
self.type = bus_type
"""
The type of the electrical bus (in Slack, PV, PQ)
"""
self.position = position
"""
The bus geographical position.
"""
self.P = None
"""
The bus active power.
"""
self.Q = None
"""
The bus reactive power.
"""
self.V = None
"""
The bus voltage amplitude.
"""
self.Th = None
"""
The bus voltage angle.
"""
def reset(self):
"""
reset(self)
Reset bus electrical values to their default values: None
"""
self.P = None
self.Q = None
self.V = None
self.Th = None
class AbstractElectricalTwoPort(AbstractElectricalElement):
@accepts((1, str))
@units.wraps(None, (None, None, units.ohm, units.ohm))
def __init__(self, friendly_name, X, R=0*units.ohm):
"""
__init__(self, friendly_name, X, R=0*units.ohm)
This class is the base for all electrical element that can be placed
on a network branch, e.g. transmission lines, transformers,
phase shifters,... It is based on the general
:class:`.AbstractElectricalElement` class. At initialization the user
has to give the two-port ``friendly_name``.
:param friendly_name: Friendly name for the element.
Should be unique within the simulation module.
:type friendly_name: str
:param X: reactance of the element
:type X: ohm, see :mod:`gridsim.unit`
:param R: resistance of the element
:type R: ohm, see :mod:`gridsim.unit`
"""
# HACK: when object is constructed with *args or **kwargs
if not isinstance(X, (int, float)):
X = units.value(units.to_si(X))
if not isinstance(R, (int, float)):
R = units.value(units.to_si(R))
super(AbstractElectricalTwoPort, self).__init__(friendly_name, )
if X <= 0:
raise RuntimeError('Line reactance X cannot be negative or null')
if R < 0:
raise RuntimeError('Line resistance R can not be negative number')
self.X = X
"""
The reactance.
"""
self.R = R
"""
The resistance.
"""
class ElectricalNetworkBranch(AbstractElectricalElement):
@accepts((1, str),
((2, 3), ElectricalBus),
(4, AbstractElectricalTwoPort))
def __init__(self, friendly_name, from_bus, to_bus, two_port):
"""
__init__(self, friendly_name, from_bus, to_bus, two_port)
Class for a branch of an electrical network, i.e. connection between two
buses (or nodes). It is oriented from one bus to the other. It is
based on the general :class:`AbstractElectricalElement` class. At
initialization, in addition to the 'friendly_name', the bus it is
starting from, and the bus it is going to, have to be given, together
with the two-port, e.g. transmission line, transformer,... it is made
of.
The chosen representation for branch electrical values is: active power
``Pi`` and reactive power ``Qi`` flowing into the branch at branch start,
and active power Po and reactive power ``Qo`` flowing out of the branch at
branch end. Their default values are None.
:param friendly_name: Friendly name for the branch.
Should be unique within the simulation module,
i.e. different for example from the friendly name of a bus
:type friendly_name: str
:param from_bus: Electrical bus from which branch is starting.
:type from_bus: :class:`.ElectricalBus`
:param to_bus: Electrical bus to which branch is going.
:type to_bus: :class:`.ElectricalBus`
:param two_port: Electrical two-port on the branch,
e.g. transmission line, transformer, ...
:type two_port: :class:`.AbstractElectricalTwoPort`
"""
if from_bus.id is None:
raise RuntimeError('From_bus bus has not been added to simulator.')
if to_bus.id is None:
raise RuntimeError('To_bus bus has not been added to simulator.')
super(ElectricalNetworkBranch, self).__init__(friendly_name)
self._from_bus_id = from_bus.id
self._to_bus_id = to_bus.id
self._two_port = two_port
self.Pij = None
"""
Active power flowing into the branch from the from-bus terminal.
"""
self.Qij = None
"""
Reactive power flowing into the branch from the from-bus terminal.
"""
self.Pji = None
"""
Active power flowing into the branch from the to-bus terminal.
"""
self.Qji = None
"""
Reactive power flowing into the branch from the to-bus terminal.
"""
@property
def from_bus_id(self):
"""
from_bus_id(self)
Gets the id of the bus the branch is starting from.
:returns: id of the bus the branch is starting from.
:rtype: int
"""
return self._from_bus_id
@property
def to_bus_id(self):
"""
to_bus_id(self)
Gets the id of the bus the branch is going to.
:returns: id of the bus the branch is going to.
:rtype: int
"""
return self._to_bus_id
def reset(self):
"""
reset(self)
Reset branch electrical values to their default : None
"""
self.Pij = None
self.Qij = None
self.Pji = None
self.Qji = None
class AbstractElectricalCPSElement(AbstractElectricalElement):
@accepts((1, str))
def __init__(self, friendly_name):
"""
__init__(self, friendly_name)
CPS stands for "Consuming-Producing-Storing".
This class is based on the :class:`AbstractElectricalElement` class.
It has the same initialization parameters : ``friendly_name``.
It differs from the superclass :class:`AbstractElectricalElement` in
giving access to the property ``delta_energy``, which is the amount of
energy consumed or stored (if positive), produced or un-stored (if
negative) during a simulation step. The class also implements the
methods :func:`gridsim.core.AbstractSimulationElement.reset`
to set the ``delta_energy`` property to 0 and
:func:`gridsim.core.AbstractSimulationElement.update` to update the
``delta_energy`` property to its current value.
:param friendly_name: Friendly name for the element. Should be unique
within the simulation module.
:type friendly_name: str
"""
super(AbstractElectricalCPSElement, self).__init__(friendly_name)
self._delta_energy = 0
self._internal_delta_energy = 0
@property
def delta_energy(self):
"""
delta_energy(self)
Gets the element consumed energy during last simulation step. Getting a
negative value means that. the element has produced energy.
:returns: energy consumed by element during last simulation step.
:rtype: time, see :mod:`gridsim.unit`
"""
return self._delta_energy
@delta_energy.setter
def delta_energy(self, value):
self._delta_energy = value
def reset(self):
"""
reset(self)
Resets the element to its initial state.
"""
self._delta_energy = 0
self._internal_delta_energy = 0
def update(self, time, delta_time):
"""
update(self, time, delta_time)
Updates ``delta_energy`` property to ist current value.
:param time: The actual time of the simulator.
:type time: time, see :mod:`gridsim.unit`
:param delta_time: The delta time for which the update has to be done.
:type delta_time: time, see :mod:`gridsim.unit`
"""
self._delta_energy = self._internal_delta_energy
|
gridsim/gridsim
|
gridsim/electrical/core.py
|
Python
|
gpl-3.0
| 11,459
|
from spym.generic import Spm_image, load_from_gsf
import matplotlib.pyplot as plt
import numpy as np
from spym.indentation import Indenter_orientation
file_name = 'wg_berk-0deg_n2_6um_000_TR.gsf'
#file_name = 'BMG8-zr60cu30al10_375c_10mN_6um_000_TR.gsf'
image =load_from_gsf(file_name)
image.change_z_unit('nm')
image.change_xy_unit('um')
image = image.crop(cx0 = 5, cx1= 5, cy0 = 5, cy1= 5)
image, trash = image.plane_fit()
image.center_min()
X0, Y0, Z0 = image.get_xyz()
R2 = X0**2 + Y0**2
r = min( X0.max(), -X0.min(), Y0.max(), -Y0.min())
k = .8
mask = np.where(R2 > r**2 * k**2, 1., np.nan )
image, trash = image.line_fit(mask = mask)
rotation, spectrum, edges, rtz = Indenter_orientation(image, 3)
angles, peaks = edges # Detected 1edges
X, Y, Z = image.get_xyz()
zmax, zmin = Z.max(), -.5 * Z.max()
levels = np.linspace(zmin, zmax, 20)
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax1.set_aspect("equal")
plt.grid()
grad = plt.contourf(X, Y, Z, levels)
plt.contour(X, Y, Z, levels, colors = 'black', linewidths = 0.2)
cbar = plt.colorbar(grad)
cbar.set_label("Altitude, $z$ [nm]")
#plt.clim(-20., 20.)
ax2 = fig.add_subplot(1,2,2)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
plt.grid()
for angle in angles:
x, y, s, z = image.section(angle = angle)
ax1.plot(x, y)
ax2.plot(s, z)
plt.show()
|
lcharleux/spym
|
doc/example_code/generic/Spm_image-section.py
|
Python
|
gpl-2.0
| 1,326
|
from unittest import TestCase
from mock import Mock
from mock import MagicMock
from mock import patch
from employee import Employee
from data import list_employees
def my_employee():
return [{
"id":"1",
"employee_name":"testing",
"employee_salary":"1",
"employee_age":"1",
"profile_image":""
}]
class TestEmployee(TestCase):
def test_mock1(self):
emp = Employee()
emp.method = MagicMock(return_value=100)
emp.method(1, 2, 3, 4, 5, key='value')
emp.method.assert_called_with(1, 2, 3, 4, 5, key='value')
def test_count_empty_employees(self):
emp = Employee()
emp.count_employees = MagicMock(return_value=0)
emp.count_employees([])
emp.count_employees.assert_called_with([])
assert emp.count_employees([]) == 0
def test_employee_instance(self):
emp = MagicMock()
emp.count_employees = MagicMock(return_value=0)
emp.count_employees([])
emp.count_employees.assert_called_with([])
assert emp.count_employees([]) == 0
#def test_exception(self):
# emp = Mock(side_effect=KeyError('My Key Error'))
# #self.assertRaises(KeyError, emp('My Key Error'))
# self.assertRaisesWithMessage(KeyError, 'My Key Error', )
|
rolandovillca/python_basis
|
unittest_mocks/test_employee.py
|
Python
|
mit
| 1,306
|
# Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import external_process
from neutron.openstack.common import log as logging
from neutron.services import advanced_service
LOG = logging.getLogger(__name__)
class MetadataDriver(advanced_service.AdvancedService):
OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')),
cfg.StrOpt('metadata_proxy_user',
default='',
help=_("User (uid or name) running metadata proxy after "
"its initialization (if empty: L3 agent effective "
"user)")),
cfg.StrOpt('metadata_proxy_group',
default='',
help=_("Group (gid or name) running metadata proxy after "
"its initialization (if empty: L3 agent effective "
"group)"))
]
def __init__(self, l3_agent):
super(MetadataDriver, self).__init__(l3_agent)
self.metadata_port = l3_agent.conf.metadata_port
def after_router_added(self, router):
for c, r in self.metadata_filter_rules(self.metadata_port):
router.iptables_manager.ipv4['filter'].add_rule(c, r)
for c, r in self.metadata_nat_rules(self.metadata_port):
router.iptables_manager.ipv4['nat'].add_rule(c, r)
router.iptables_manager.apply()
if not router.is_ha:
self._spawn_metadata_proxy(router.router_id,
router.ns_name,
self.l3_agent.conf)
def before_router_removed(self, router):
for c, r in self.metadata_filter_rules(self.metadata_port):
router.iptables_manager.ipv4['filter'].remove_rule(c, r)
for c, r in self.metadata_nat_rules(self.metadata_port):
router.iptables_manager.ipv4['nat'].remove_rule(c, r)
router.iptables_manager.apply()
self._destroy_metadata_proxy(router.router['id'],
router.ns_name,
self.l3_agent.conf)
@classmethod
def metadata_filter_rules(cls, port):
return [('INPUT', '-s 0.0.0.0/0 -p tcp -m tcp --dport %s '
'-j ACCEPT' % port)]
@classmethod
def metadata_nat_rules(cls, port):
return [('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-port %s' % port)]
@classmethod
def _get_metadata_proxy_user_group(cls, conf):
user = conf.metadata_proxy_user or os.geteuid()
group = conf.metadata_proxy_group or os.getegid()
return user, group
@classmethod
def _get_metadata_proxy_callback(cls, router_id, conf):
def callback(pid_file):
metadata_proxy_socket = conf.metadata_proxy_socket
user, group = cls._get_metadata_proxy_user_group(conf)
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
'--router_id=%s' % router_id,
'--state_path=%s' % conf.state_path,
'--metadata_port=%s' % conf.metadata_port,
'--metadata_proxy_user=%s' % user,
'--metadata_proxy_group=%s' % group]
proxy_cmd.extend(config.get_log_args(
conf, 'neutron-ns-metadata-proxy-%s.log' %
router_id))
return proxy_cmd
return callback
@classmethod
def _get_metadata_proxy_process_manager(cls, router_id, ns_name, conf):
return external_process.ProcessManager(
conf,
router_id,
config.get_root_helper(conf),
ns_name)
@classmethod
def _spawn_metadata_proxy(cls, router_id, ns_name, conf):
callback = cls._get_metadata_proxy_callback(router_id, conf)
pm = cls._get_metadata_proxy_process_manager(router_id, ns_name, conf)
pm.enable(callback)
@classmethod
def _destroy_metadata_proxy(cls, router_id, ns_name, conf):
pm = cls._get_metadata_proxy_process_manager(router_id, ns_name, conf)
pm.disable()
|
blueboxgroup/neutron
|
neutron/agent/metadata/driver.py
|
Python
|
apache-2.0
| 5,140
|
import os
import tempfile
configdir = os.path.abspath(os.path.dirname(__file__))
basedir = os.path.dirname(configdir)
tempdir = tempfile.gettempdir()
FIGSHARE_CALLBACK_URI = 'http://localhost:5000/callback'
# These articles appear in the dashboard if the user did not authenticate with Figshare
FIGSHARE_PREVIEW_IDS = ['828798' ,'90206', '860460', '91672', '92089', '785731', '104629', '94593']
SESSION_COOKIE_NAME = "linkitup_session"
CSRF_ENABLED = True
SECRET_KEY = '\x14%<`2\xecT*\xa7M\xd0\x90%\x8d\x9a\xdd\xdbCF\xec\x96\x0e\x0e\x96'
OPENID_FS_STORE_PATH = os.path.join(basedir, 'tmp')
OPENID_PROVIDERS = [
{ 'name': 'Google', 'url': 'https://www.google.com/accounts/o8/id' },
{ 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
{ 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
{ 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
{ 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
# Setup Upload locations
UPLOADS_DEFAULT_DEST = os.path.join(tempdir, 'linkitup')
# Setup Plugins
PLUGINS_FILE = os.path.join(configdir, "plugins.yaml")
# Session store location
SESSION_STORE = os.path.join(basedir, 'tmp')
# Nanopublications store location
NANOPUBLICATION_STORE = os.path.join(basedir, 'nanopublications')
## Graph Store configuration
# GRAPH_STORE_ENDPOINT = 'http://d2s.ops.few.vu.nl/rdf-graph-store'
# GRAPH_STORE_AUTH = 'linkitup:password'
# Logging folder
LOG_FOLDER = os.path.join(basedir, 'log')
# Logging at DEBUG level?
DEBUG = True
|
Data2Semantics/linkitup
|
linkitup/config.py
|
Python
|
mit
| 1,597
|
# coding=utf-8
"""models.py
用于定义ORM中用到的类、关系及某些表中的初始数据及工具函数。
新建数据库时应该依次调用::
Role.insert_roles()
School.insert_school_structure()
Attributes:
"""
from __future__ import absolute_import, unicode_literals
from datetime import datetime
import tsxypy
from flask import current_app, url_for, abort
from flask_login import UserMixin, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from werkzeug.security import generate_password_hash, check_password_hash
from app.exceptions import ValidationError
from . import db, login_manager
class Operation:
ADD = 0x01
REMOVE = 0x02
class Permission:
"""权限类 用于规定权限的二进制数值"""
VIEW_SCHEDULE = 0x01 #: 查看课程表
VIEW_SCORE = 0x02 #: 查看成绩
VIEW_ALL_SCHEDULE = 0x04 #: 查看所有人的课程表
VIEW_ALL_SCORE = 0x08 #: 查看所有人的成绩
MODIFY = 0x10 #: 编辑权限
ADMINISTER = 0x80 #: 管理员权限
student = VIEW_SCHEDULE | VIEW_SCORE
teacher = VIEW_ALL_SCHEDULE | VIEW_ALL_SCORE
teacher_v = VIEW_ALL_SCHEDULE | VIEW_ALL_SCORE | MODIFY
administrator = 0xff
@staticmethod
def to_json():
permissions = []
for member in dir(Permission):
value = eval('Permission.' + member)
if isinstance(value, int) and member[0].isupper():
permissions.append({'permission': member, 'value': value})
return {'permissions': permissions}
class Role(db.Model):
"""角色类
每个用户的身份信息、 权限
"""
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
"""向角色表插入角色数据
:return: N/A
"""
roles = {
'Student': (Permission.student, True),
'Teacher': (Permission.teacher, False),
'Teacher_V': (Permission.teacher_v, False),
'Administrator': (Permission.administrator, False),
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
@staticmethod
def to_json():
"""角色json"""
return {
'roles': [{
'name': role.name,
'permissions': role.permissions
} for role in Role.query.all()]
}
class School(db.Model):
__tablename__ = 'schools'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
school_code = db.Column(db.String(16))
departments = db.relationship('Department', backref='school', lazy='dynamic')
def __repr__(self):
return '<School %r>' % self.name
@staticmethod
def insert_school_structure():
"""使用抓取到的学校院系部结构生成表结构
:return: N/A
"""
import os
base_dir = os.getcwd()
tmp_dir = os.path.join(base_dir, 'tmp')
school_file_dir = os.path.join(tmp_dir, 'school_dict')
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
if os.path.exists(school_file_dir):
with open(school_file_dir) as f:
import pickle
school_dict = pickle.load(f)
else:
sc = tsxypy.ScheduleCatcher()
school_dict = sc.get_school_json()
with open(school_file_dir, 'w') as f:
import pickle
pickle.dump(school_dict, f)
for school_year in school_dict['school_years']:
print(school_year['year'])
for department in school_year['departments']:
print("Dict: code:%s, name:%s in" % (department['code'], department['name']))
d = Department.query.filter_by(department_code=department['code']).first()
if not d:
d = Department(name=department['name'], department_code=department['code'])
db.session.add(d)
db.session.commit()
else:
print("DB: already in db code:%s, name:%s" % (d.department_code, d.name))
for specialty in department['specialties']:
print("Dict: code:%s, name:%s in" % (specialty['code'], specialty['name']))
s = Specialty.query.filter_by(specialty_code=specialty['code']).first()
if not s:
s = Specialty(name=specialty['name'], specialty_code=specialty['code'])
s.department = d
db.session.add(s)
db.session.commit()
else:
print("DB: already in db code:%s, name:%s" % (s.specialty_code, s.name))
for _class in specialty['classes']:
print("Dict: code:%s, name:%s in" % (_class['code'], _class['name']))
c = _Class.query.filter_by(class_code=_class['code']).first()
if not c:
c = _Class(name=_class['name'], class_code=_class['code'])
c.specialty = s
db.session.add(c)
db.session.commit()
else:
print("DB: already in db code:%s, name:%s" % (c.class_code, c.name))
class Department(db.Model):
"""学院/系别/部门"""
__tablename__ = 'departments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
department_code = db.Column(db.String(16))
school_id = db.Column(db.Integer, db.ForeignKey('schools.id'))
specialties = db.relationship('Specialty', backref='department', lazy='dynamic')
def __repr__(self):
return '<Department %r>' % self.name
class Specialty(db.Model):
"""专业"""
__tablename__ = 'specialties'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
specialty_code = db.Column(db.String(16))
department_id = db.Column(db.Integer, db.ForeignKey('departments.id'))
classes = db.relationship('_Class', backref='specialty', lazy='dynamic')
def __repr__(self):
return '<Specialty %r>' % self.name
enrollments = db.Table('enrollments',
db.Column('course_id', db.Integer, db.ForeignKey('courses.id')),
db.Column('class_id', db.Integer, db.ForeignKey('classes.id'))
)
class _Class(db.Model):
"""班级"""
__tablename__ = 'classes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
class_code = db.Column(db.String(16))
specialty_id = db.Column(db.Integer, db.ForeignKey('specialties.id'))
students = db.relationship('User', backref='_class', lazy='dynamic')
courses = db.relationship('Course',
secondary=enrollments,
backref=db.backref('classes', lazy='dynamic'),
lazy='dynamic')
"""班级与课程的多对多关系定义"""
def __repr__(self):
return '<_Class %r>' % self.name
class Temp(db.Model):
"""缓存表 缓存字符串"""
__tablename__ = 'temp'
id = db.Column(db.Integer, primary_key=True)
mark = db.Column(db.String)
identify = db.Column(db.String)
content = db.Column(db.Text)
date = db.Column(db.DateTime(), default=datetime.utcnow)
life = db.Column(db.Integer)
@staticmethod
def set_temp(mark, identify, content, life=2):
# type: (str, str, object, int) -> None
"""放置缓存
:param life:
:param string mark: 标记 声明缓存的用途
:param string identify: 用户标识 用于区分不同用户的缓存记录
:param content: 内容 多为dict
:return: None
"""
temps = Temp.query.filter_by(mark=mark, identify=identify).all()
for t in temps:
db.session.delete(t)
t = Temp(mark=mark, identify=identify, content=str(content), life=life)
db.session.add(t)
db.session.commit()
@staticmethod
def get_temp(mark, identify):
"""取出缓存
:param mark: 标记 声明缓存的用途
:param identify: 指定用户标示
:return: 之前缓存的对象 (default:None)
"""
t = Temp.query.filter_by(mark=mark, identify=identify).first()
if not t:
return None
delta = datetime.utcnow() - t.date
if delta.days < t.life:
content = eval(t.content)
if isinstance(content, type({})):
content['cache'] = True
content['cache-date'] = localtime(t.date)
return content
else:
return None
@staticmethod
def set_schedule_cache_for_stu_id(stu_id, schedule):
"""缓存学号对应的课程表
:param string stu_id: 学生学号
:param dict schedule: 课程表词典
:return: None
"""
Temp.set_temp(mark='schedule_stu_id', identify=stu_id, content=schedule)
@staticmethod
def get_schedule_cache_for_stu_id(stu_id):
"""通过学号获取缓存的课程表
:param string stu_id: 学生学号
:return: 课程表dict 无缓存或缓存过期则返回None
"""
return Temp.get_temp(mark='schedule_stu_id', identify=stu_id)
@staticmethod
def set_school_structure(school_structure):
Temp.set_temp(mark='school_structure', identify=None, content=school_structure, life=360)
@staticmethod
def get_school_structure():
return Temp.get_temp(mark='school_structure', identify=None)
class RawCourse(db.Model):
"""原课程
抽象的课程概念。如不同年级、不同专业、不同教师所教授的“毛概”都是毛概课
"""
__tablename__ = 'raw_courses'
id = db.Column(db.Integer, primary_key=True) #: 原课程id
name = db.Column(db.String(128)) #: 课程名
nickname = db.Column(db.String(128)) #: 课程昵称 如“毛泽东思想与中国特色社会主义理论体系概论”昵称为“毛概”
course_code = db.Column(db.String(16)) #: 课程代号
worth = db.Column(db.String(2)) #: 学分
courses = db.relationship('Course', backref='raw_course', lazy='dynamic')
def to_json(self):
raw_course_json = {
'id': self.id,
'name': self.name,
'nickname': self.nickname,
'course_code': self.course_code,
'worth': self.worth,
'url': url_for('api.get_raw_courses_by_id', id=self.id, _external=True),
}
return raw_course_json
@staticmethod
def from_json(json_post):
name = json_post.get('name')
nickname = json_post.get('nickname')
course_code = json_post.get('course_code')
worth = json_post.get('worth')
if name is None or name == '':
raise ValidationError('Must have name')
return RawCourse(name=name, nickname=nickname, course_code=course_code, worth=worth)
substitutes = db.Table('substitutes',
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('course_id', db.Integer, db.ForeignKey('courses.id')),
)
class Course(db.Model):
"""课程
具体的课程,包含上课时间地点周次、哪位老师负责、上课涉及班级等具体信息
"""
__tablename__ = 'courses'
id = db.Column(db.Integer, primary_key=True) #: 课程id
when_code = db.Column(db.String(32)) #: 上课时间代号
week = db.Column(db.String(64)) #: 上课周次
week_raw = db.Column(db.String(32)) #: 未解析的上课周次
parity = db.Column(db.String(32)) #: 单双周属性
which_room = db.Column(db.String(32)) #: 上课教室
where = db.Column(db.String(32)) #: 上课校区
raw_course_id = db.Column(db.Integer, db.ForeignKey('raw_courses.id'))
teacher_id = db.Column(db.Integer, db.ForeignKey('users.id'))
substitute_teachers = db.relationship('User',
secondary=substitutes,
backref=db.backref('guest_courses', lazy='dynamic'),
lazy='dynamic')
def __init__(self, teacher, raw_course, **kwargs):
super(Course, self).__init__(**kwargs)
self.teacher = teacher
self.raw_course = raw_course
@staticmethod
def is_safety(string):
# type: (str) -> bool
allow_chr = [' ', ',', '[', ']', ',', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
try:
for each in string:
if each not in allow_chr:
raise ValueError("含有非安全字符")
return True
except ValueError:
return False
def to_json(self):
if self.week is not None and Course.is_safety(self.week):
week = eval(self.week)
else:
week = None
course_json = {
'id': self.id,
'when_code': self.when_code,
'week': week,
'week_raw': self.week_raw,
'parity': self.parity,
'which_room': self.which_room,
'where': self.where,
'raw_course_id': self.raw_course.id,
'name': self.raw_course.name,
'nickname': self.raw_course.nickname,
'teacher_id': self.teacher_id,
'teacher': self.teacher.name,
'url': url_for('api.get_courses_by_id', id=self.id, _external=True),
'classes': [c.id for c in self.classes],
}
return course_json
@staticmethod
def from_json(post_json):
"""通过用户传入Json新建课程对象"""
when_code = post_json.get('when_code')
week = post_json.get('week')
if when_code is None or week is None:
raise ValidationError("必须含有上课时间(when_code)、上课周次(week)")
week_str = str(week)
if week_str is not None and Course.is_safety(week_str):
week = week_str
else:
abort(400, u"week字段中有错误")
# 检测用户传入的课程字段有无错误
classes_id_list = post_json.get('classes')
if classes_id_list is not None:
for c_id in classes_id_list:
c = _Class.query.filter_by(id=c_id).first()
if c is None:
abort(400, u'课程班级信息中有错误')
else:
abort(400, u'缺少课程班级信息')
week_raw = post_json.get('week_raw')
parity = post_json.get('parity')
which_room = post_json.get('which_room')
where = post_json.get('where')
raw_course_id = post_json.get('raw_course_id')
teacher_id = post_json.get('teacher_id')
raw_course = RawCourse.query.get_or_404(raw_course_id)
teacher = User.query.get_or_404(teacher_id)
course = Course(teacher, raw_course, when_code=when_code, week=week, week_raw=week_raw,
parity=parity, which_room=which_room, where=where)
db.session.add(course)
db.session.commit()
# 插入到数据库后 添加课程的上课班级
for c_id in classes_id_list:
c = _Class.query.filter_by(id=c_id).first()
course.classes.append(c)
return course
def operate_classes(self, operation, _classes):
# type: (int, list) -> None
"""为课程添加/删除上课班级
:param operation: 执行的操作 应为Operation类中的类变量
:param list _classes: _Class 班级对象 列表
"""
for each in _classes:
if operation is Operation.ADD: # 向课程中新增班级
self.classes.append(each)
elif operation is Operation.REMOVE: # 从课程中删除班级
self.classes.remove(each)
db.session.add(self)
db.session.commit()
def appoint_substitute_teacher(self, operation, users):
# type: (int, list) -> None
"""指定代课教师
课程负责教师、代本课的教师、教务处管理教师 能指定代课教师
"""
for each in users:
if operation is Operation.ADD:
self.substitute_teachers.append(each)
elif operation is Operation.REMOVE:
self.substitute_teachers.remove(each)
db.session.add(self)
db.session.commit()
class User(UserMixin, db.Model):
"""用户model"""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
school_code = db.Column(db.String(16), unique=True, index=True) #: 学号
user_code = db.Column(db.String(16), unique=True, index=True) #: 教务系统用户代号
username = db.Column(db.String(64), unique=True, index=True) #: 用户名(自拟)
name = db.Column(db.String(64)) #: 姓名
about_me = db.Column(db.Text()) #: 个人简介
password_hash = db.Column(db.String(128)) #: (加密过的)密码
confirmed = db.Column(db.Boolean, default=False) #: 通过确认
member_since = db.Column(db.DateTime(), default=datetime.utcnow) #: 账号注册时间
last_seen = db.Column(db.DateTime(), default=datetime.utcnow) #: 最后登录时间
role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) #: 所属角色
class_id = db.Column(db.Integer, db.ForeignKey('classes.id')) #: 所属班级
courses = db.relationship('Course', backref='teacher', lazy='dynamic') #: 教师负责的课程
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def to_json(self):
"""将本对象转换为json
:return: 用户信息json
"""
role = self.role
json_user = {
'id': self.id,
'url': url_for('api.get_user', id=self.id, _external=True),
'school_code': self.school_code,
'username': self.username,
'member_since': localtime(self.member_since),
'last_seen': localtime(self.last_seen),
'role': role.name,
'permissions': role.permissions,
}
return json_user
def generate_auth_token(self, expiration):
"""生成认证token
:param expiration: 生存期 单位为秒
:return:
"""
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
"""验证认证token
:param token: 之前获取的token
:return: 如通过验证,返回登录用户的对象
"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
"""陌生人
没有任何权限
"""
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def localtime(utc_time):
"""获取utc时间转换为的亚洲时间
:param utc_time: utc时间
:return: 亚洲时间
"""
if not isinstance(utc_time, type(datetime.utcnow())):
return None
from pytz import timezone
local_time = utc_time.replace(tzinfo=timezone('UTC')).astimezone(timezone('Asia/Chongqing'))
return local_time.strftime('%Y-%m-%d %H:%M:%S %Z')
class Version(db.Model):
id = db.Column(db.Integer, primary_key=True)
new = db.Column(db.Boolean)
version = db.Column(db.String(32))
download_link_android = db.Column(db.Text)
whatsnew = db.Column(db.Text)
def to_json(self):
return {
'version': self.version,
'download_android': self.download_link_android,
'download_qr_url': 'http://qr.topscan.com/api.php?'
'text=%s&logo=http://otl5stjju.bkt.clouddn.com/logo.png' % self.download_link_android,
'whatsnew': self.whatsnew,
}
@staticmethod
def new_version(version_str, download_link, whatsnew):
# type: (str, str, str) ->None
for each in Version.query.filter_by(new=True).all():
each.new = False
db.session.add(each)
version = Version(new=True, version=version_str, download_link_android=download_link,
whatsnew=whatsnew)
db.session.add(version)
db.session.commit()
|
bllli/tsxyAssistant
|
app/models.py
|
Python
|
gpl-3.0
| 24,028
|
from django.db import models
from django.core.exceptions import ValidationError
from cyder.cydns.domain.models import Domain, _name_to_domain
from cyder.cydns.ip.models import Ip
from cyder.cydns.validation import validate_name
from cyder.cydns.mixins import ObjectUrlMixin
class PTR(Ip, ObjectUrlMixin):
"""A PTR is used to map an IP to a domain name.
>>> PTR(ip_str=ip_str, name=fqdn, ip_type=ip_type)
"""
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, validators=[validate_name])
data_domain = models.ForeignKey(Domain, null=True, blank=True)
def details(self):
return (
('Ip', str(self.ip_str)),
('Record Type', 'PTR'),
('Name', self.name),
)
class Meta:
db_table = 'ptr'
unique_together = ('ip_str', 'ip_type', 'name')
def save(self, *args, **kwargs):
if self.pk: # We need to exist in the db first.
db_self = PTR.objects.get(pk=self.pk)
if db_self.name == self.name and db_self.ip_str == self.ip_str:
# Nothing important changed. Don't rebuild the zone file.
rebuild = False
else:
rebuild = True
else:
rebuild = True
if rebuild:
self.reverse_domain.dirty = True
self.reverse_domain.save() # The reverse_domain field is in the Ip
# class.
super(PTR, self).save(*args, **kwargs)
def validate_no_cname(self):
"""Considering existing CNAMES must be done when editing and
creating new :class:`PTR` objects.
"PTR records must point back to a valid A record, not a
alias defined by a CNAME."
-- `RFC 1912 <http://tools.ietf.org/html/rfc1912>`__
An example of something that is not allowed::
FOO.BAR.COM CNAME BEE.BAR.COM
BEE.BAR.COM A 128.193.1.1
1.1.193.128 PTR FOO.BAR.COM
^-- PTR's shouldn't point to CNAMES
"""
pass
#TODO, impliment this function and call it in clean()
def clean(self, *args, **kwargs):
if 'update_reverse_domain' in kwargs: # TODO, clean this up
urd = kwargs.pop('update_reverse_domain')
self.clean_ip(update_reverse_domain=urd)
else:
self.clean_ip()
self.data_domain = _name_to_domain(self.name)
def __str__(self):
return "{0} {1} {2}".format(str(self.ip_str), 'PTR', self.name)
def __repr__(self):
return "<{0}>".format(str(self))
|
ngokevin/cyder
|
cyder/cydns/ptr/models.py
|
Python
|
bsd-3-clause
| 2,688
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetCdnKey
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-video-stitcher
# [START videostitcher_v1_generated_VideoStitcherService_GetCdnKey_sync]
from google.cloud.video import stitcher_v1
def sample_get_cdn_key():
# Create a client
client = stitcher_v1.VideoStitcherServiceClient()
# Initialize request argument(s)
request = stitcher_v1.GetCdnKeyRequest(
name="name_value",
)
# Make the request
response = client.get_cdn_key(request=request)
# Handle the response
print(response)
# [END videostitcher_v1_generated_VideoStitcherService_GetCdnKey_sync]
|
googleapis/python-video-stitcher
|
samples/generated_samples/videostitcher_v1_generated_video_stitcher_service_get_cdn_key_sync.py
|
Python
|
apache-2.0
| 1,467
|
#!/usr/bin/env python
# Copyright 2015 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import difflib
import sys
def main(argv):
if len(argv) != 3:
print '%s: invalid arguments' % argv[0]
return 2
filename1 = argv[1]
filename2 = argv[2]
try:
with open(filename1, "r") as f1:
str1 = f1.readlines();
with open(filename2, "r") as f2:
str2 = f2.readlines();
diffs = difflib.unified_diff(
str1, str2, fromfile=filename1, tofile=filename2)
except Exception as e:
print "something went astray: %s" % e
return 1
status_code = 0
for diff in diffs:
sys.stdout.write(diff)
status_code = 1
return status_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
guorendong/iridium-browser-ubuntu
|
third_party/pdfium/testing/tools/text_diff.py
|
Python
|
bsd-3-clause
| 823
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers import detection
from paddle.fluid.framework import Program, program_guard
import unittest
class TestDetection(unittest.TestCase):
def test_detection_output(self):
program = Program()
with program_guard(program):
pb = layers.data(
name='prior_box',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
pbv = layers.data(
name='prior_box_var',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
loc = layers.data(
name='target_box',
shape=[2, 10, 4],
append_batch_size=False,
dtype='float32')
scores = layers.data(
name='scores',
shape=[2, 10, 20],
append_batch_size=False,
dtype='float32')
out = layers.detection_output(
scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv)
out2, index = layers.detection_output(
scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
self.assertIsNotNone(out)
self.assertIsNotNone(out2)
self.assertIsNotNone(index)
self.assertEqual(out.shape[-1], 6)
print(str(program))
def test_box_coder_api(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[4], dtype='float32')
y = layers.data(name='z', shape=[4], dtype='float32', lod_level=1)
bcoder = layers.box_coder(
prior_box=x,
prior_box_var=[0.1, 0.2, 0.1, 0.2],
target_box=y,
code_type='encode_center_size')
self.assertIsNotNone(bcoder)
print(str(program))
def test_detection_api(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[4], dtype='float32')
y = layers.data(name='y', shape=[4], dtype='float32')
z = layers.data(name='z', shape=[4], dtype='float32', lod_level=1)
iou = layers.iou_similarity(x=x, y=y)
bcoder = layers.box_coder(
prior_box=x,
prior_box_var=y,
target_box=z,
code_type='encode_center_size')
self.assertIsNotNone(iou)
self.assertIsNotNone(bcoder)
matched_indices, matched_dist = layers.bipartite_match(iou)
self.assertIsNotNone(matched_indices)
self.assertIsNotNone(matched_dist)
gt = layers.data(
name='gt', shape=[1, 1], dtype='int32', lod_level=1)
trg, trg_weight = layers.target_assign(
gt, matched_indices, mismatch_value=0)
self.assertIsNotNone(trg)
self.assertIsNotNone(trg_weight)
gt2 = layers.data(
name='gt2', shape=[10, 4], dtype='float32', lod_level=1)
trg, trg_weight = layers.target_assign(
gt2, matched_indices, mismatch_value=0)
self.assertIsNotNone(trg)
self.assertIsNotNone(trg_weight)
print(str(program))
def test_ssd_loss(self):
program = Program()
with program_guard(program):
pb = layers.data(
name='prior_box',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
pbv = layers.data(
name='prior_box_var',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
loc = layers.data(name='target_box', shape=[10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = layers.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = layers.data(
name='gt_label', shape=[1], lod_level=1, dtype='int32')
loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
self.assertIsNotNone(loss)
self.assertEqual(loss.shape[-1], 1)
print(str(program))
class TestPriorBox(unittest.TestCase):
def test_prior_box(self):
program = Program()
with program_guard(program):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.prior_box(
input=conv1,
image=images,
min_sizes=[100.0],
aspect_ratios=[1.],
flip=True,
clip=True)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[3] == 4
class TestPriorBox2(unittest.TestCase):
def test_prior_box(self):
program = Program()
with program_guard(program):
data_shape = [None, 3, None, None]
images = fluid.data(name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.prior_box(
input=conv1,
image=images,
min_sizes=[100.0],
aspect_ratios=[1.],
flip=True,
clip=True)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[3] == 4
class TestDensityPriorBox(unittest.TestCase):
def test_density_prior_box(self):
program = Program()
with program_guard(program):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.density_prior_box(
input=conv1,
image=images,
densities=[3, 4],
fixed_sizes=[50., 60.],
fixed_ratios=[1.0],
clip=True)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[-1] == 4
class TestAnchorGenerator(unittest.TestCase):
def test_anchor_generator(self):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
assert len(anchor.shape) == 4
assert anchor.shape == var.shape
assert anchor.shape[3] == 4
class TestGenerateProposalLabels(unittest.TestCase):
def test_generate_proposal_labels(self):
program = Program()
with program_guard(program):
rpn_rois = layers.data(
name='rpn_rois',
shape=[4, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
gt_classes = layers.data(
name='gt_classes',
shape=[6],
dtype='int32',
lod_level=1,
append_batch_size=False)
is_crowd = layers.data(
name='is_crowd',
shape=[6],
dtype='int32',
lod_level=1,
append_batch_size=False)
gt_boxes = layers.data(
name='gt_boxes',
shape=[6, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
im_info = layers.data(
name='im_info',
shape=[1, 3],
dtype='float32',
lod_level=1,
append_batch_size=False)
class_nums = 5
outs = fluid.layers.generate_proposal_labels(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=2,
fg_fraction=0.5,
fg_thresh=0.5,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=class_nums)
rois = outs[0]
labels_int32 = outs[1]
bbox_targets = outs[2]
bbox_inside_weights = outs[3]
bbox_outside_weights = outs[4]
assert rois.shape[1] == 4
assert rois.shape[0] == labels_int32.shape[0]
assert rois.shape[0] == bbox_targets.shape[0]
assert rois.shape[0] == bbox_inside_weights.shape[0]
assert rois.shape[0] == bbox_outside_weights.shape[0]
assert bbox_targets.shape[1] == 4 * class_nums
assert bbox_inside_weights.shape[1] == 4 * class_nums
assert bbox_outside_weights.shape[1] == 4 * class_nums
class TestGenerateMaskLabels(unittest.TestCase):
def test_generate_mask_labels(self):
program = Program()
with program_guard(program):
im_info = layers.data(
name='im_info',
shape=[1, 3],
dtype='float32',
lod_level=1,
append_batch_size=False)
gt_classes = layers.data(
name='gt_classes',
shape=[2, 1],
dtype='int32',
lod_level=1,
append_batch_size=False)
is_crowd = layers.data(
name='is_crowd',
shape=[2, 1],
dtype='int32',
lod_level=1,
append_batch_size=False)
gt_segms = layers.data(
name='gt_segms',
shape=[20, 2],
dtype='float32',
lod_level=3,
append_batch_size=False)
rois = layers.data(
name='rois',
shape=[4, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
labels_int32 = layers.data(
name='labels_int32',
shape=[4, 1],
dtype='int32',
lod_level=1,
append_batch_size=False)
num_classes = 5
resolution = 14
outs = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_segms,
rois=rois,
labels_int32=labels_int32,
num_classes=num_classes,
resolution=resolution)
mask_rois, roi_has_mask_int32, mask_int32 = outs
assert mask_rois.shape[1] == 4
assert mask_int32.shape[1] == num_classes * resolution * resolution
class TestMultiBoxHead(unittest.TestCase):
def test_multi_box_head(self):
data_shape = [3, 224, 224]
mbox_locs, mbox_confs, box, var = self.multi_box_head_output(data_shape)
assert len(box.shape) == 2
assert box.shape == var.shape
assert box.shape[1] == 4
assert mbox_locs.shape[1] == mbox_confs.shape[1]
def multi_box_head_output(self, data_shape):
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
conv2 = fluid.layers.conv2d(conv1, 3, 3, 2)
conv3 = fluid.layers.conv2d(conv2, 3, 3, 2)
conv4 = fluid.layers.conv2d(conv3, 3, 3, 2)
conv5 = fluid.layers.conv2d(conv4, 3, 3, 2)
mbox_locs, mbox_confs, box, var = layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv5],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
return mbox_locs, mbox_confs, box, var
class TestDetectionMAP(unittest.TestCase):
def test_detection_map(self):
program = Program()
with program_guard(program):
detect_res = layers.data(
name='detect_res',
shape=[10, 6],
append_batch_size=False,
dtype='float32')
label = layers.data(
name='label',
shape=[10, 6],
append_batch_size=False,
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
self.assertIsNotNone(map_out)
self.assertEqual(map_out.shape, (1, ))
print(str(program))
class TestRpnTargetAssign(unittest.TestCase):
def test_rpn_target_assign(self):
program = Program()
with program_guard(program):
bbox_pred_shape = [10, 50, 4]
cls_logits_shape = [10, 50, 2]
anchor_shape = [50, 4]
bbox_pred = layers.data(
name='bbox_pred',
shape=bbox_pred_shape,
append_batch_size=False,
dtype='float32')
cls_logits = layers.data(
name='cls_logits',
shape=cls_logits_shape,
append_batch_size=False,
dtype='float32')
anchor_box = layers.data(
name='anchor_box',
shape=anchor_shape,
append_batch_size=False,
dtype='float32')
anchor_var = layers.data(
name='anchor_var',
shape=anchor_shape,
append_batch_size=False,
dtype='float32')
gt_boxes = layers.data(
name='gt_boxes', shape=[4], lod_level=1, dtype='float32')
is_crowd = layers.data(
name='is_crowd',
shape=[1, 10],
dtype='int32',
lod_level=1,
append_batch_size=False)
im_info = layers.data(
name='im_info',
shape=[1, 3],
dtype='float32',
lod_level=1,
append_batch_size=False)
outs = layers.rpn_target_assign(
bbox_pred=bbox_pred,
cls_logits=cls_logits,
anchor_box=anchor_box,
anchor_var=anchor_var,
gt_boxes=gt_boxes,
is_crowd=is_crowd,
im_info=im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=False)
pred_scores = outs[0]
pred_loc = outs[1]
tgt_lbl = outs[2]
tgt_bbox = outs[3]
bbox_inside_weight = outs[4]
self.assertIsNotNone(pred_scores)
self.assertIsNotNone(pred_loc)
self.assertIsNotNone(tgt_lbl)
self.assertIsNotNone(tgt_bbox)
self.assertIsNotNone(bbox_inside_weight)
assert pred_scores.shape[1] == 1
assert pred_loc.shape[1] == 4
assert pred_loc.shape[1] == tgt_bbox.shape[1]
print(str(program))
class TestGenerateProposals(unittest.TestCase):
def test_generate_proposals(self):
program = Program()
with program_guard(program):
data_shape = [20, 64, 64]
images = fluid.layers.data(
name='images', shape=data_shape, dtype='float32')
im_info = fluid.layers.data(
name='im_info', shape=[3], dtype='float32')
anchors, variances = fluid.layers.anchor_generator(
name='anchor_generator',
input=images,
anchor_sizes=[32, 64],
aspect_ratios=[1.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
num_anchors = anchors.shape[2]
scores = fluid.layers.data(
name='scores', shape=[num_anchors, 8, 8], dtype='float32')
bbox_deltas = fluid.layers.data(
name='bbox_deltas',
shape=[num_anchors * 4, 8, 8],
dtype='float32')
rpn_rois, rpn_roi_probs = fluid.layers.generate_proposals(
name='generate_proposals',
scores=scores,
bbox_deltas=bbox_deltas,
im_info=im_info,
anchors=anchors,
variances=variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0)
self.assertIsNotNone(rpn_rois)
self.assertIsNotNone(rpn_roi_probs)
print(rpn_rois.shape)
class TestYoloDetection(unittest.TestCase):
def test_yolov3_loss(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[30, 7, 7], dtype='float32')
gt_box = layers.data(name='gt_box', shape=[10, 4], dtype='float32')
gt_label = layers.data(name='gt_label', shape=[10], dtype='int32')
gt_score = layers.data(name='gt_score', shape=[10], dtype='float32')
loss = layers.yolov3_loss(
x,
gt_box,
gt_label, [10, 13, 30, 13], [0, 1],
10,
0.7,
32,
gt_score=gt_score,
use_label_smooth=False)
self.assertIsNotNone(loss)
def test_yolo_box(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[30, 7, 7], dtype='float32')
img_size = layers.data(name='img_size', shape=[2], dtype='int32')
boxes, scores = layers.yolo_box(x, img_size, [10, 13, 30, 13], 10,
0.01, 32)
self.assertIsNotNone(boxes)
self.assertIsNotNone(scores)
class TestBoxClip(unittest.TestCase):
def test_box_clip(self):
program = Program()
with program_guard(program):
input_box = layers.data(
name='input_box', shape=[7, 4], dtype='float32', lod_level=1)
im_info = layers.data(name='im_info', shape=[3], dtype='float32')
out = layers.box_clip(input_box, im_info)
self.assertIsNotNone(out)
class TestMulticlassNMS(unittest.TestCase):
def test_multiclass_nms(self):
program = Program()
with program_guard(program):
bboxes = layers.data(
name='bboxes', shape=[-1, 10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[-1, 10], dtype='float32')
output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7)
self.assertIsNotNone(output)
class TestMulticlassNMS2(unittest.TestCase):
def test_multiclass_nms2(self):
program = Program()
with program_guard(program):
bboxes = layers.data(
name='bboxes', shape=[-1, 10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[-1, 10], dtype='float32')
output = fluid.contrib.multiclass_nms2(bboxes, scores, 0.3, 400,
200, 0.7)
output2, index = fluid.contrib.multiclass_nms2(
bboxes, scores, 0.3, 400, 200, 0.7, return_index=True)
self.assertIsNotNone(output)
self.assertIsNotNone(output2)
self.assertIsNotNone(index)
class TestCollectFpnPropsals(unittest.TestCase):
def test_collect_fpn_proposals(self):
program = Program()
with program_guard(program):
multi_bboxes = []
multi_scores = []
for i in range(4):
bboxes = layers.data(
name='rois' + str(i),
shape=[10, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
scores = layers.data(
name='scores' + str(i),
shape=[10, 1],
dtype='float32',
lod_level=1,
append_batch_size=False)
multi_bboxes.append(bboxes)
multi_scores.append(scores)
fpn_rois = layers.collect_fpn_proposals(multi_bboxes, multi_scores,
2, 5, 10)
self.assertIsNotNone(fpn_rois)
class TestDistributeFpnProposals(unittest.TestCase):
def test_distribute_fpn_proposals(self):
program = Program()
with program_guard(program):
fpn_rois = fluid.layers.data(
name='data', shape=[4], dtype='float32', lod_level=1)
multi_rois, restore_ind = layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
self.assertIsNotNone(multi_rois)
self.assertIsNotNone(restore_ind)
if __name__ == '__main__':
unittest.main()
|
chengduoZH/Paddle
|
python/paddle/fluid/tests/test_detection.py
|
Python
|
apache-2.0
| 22,717
|
# Copyright 2022 The Deluca Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shift Scale Transform."""
import jax.numpy as jnp
class ShiftScaleTransform:
"""Data normalizer."""
# vectors is an array of vectors
def __init__(self, vectors):
vectors_concat = jnp.concatenate(vectors)
self.mean = jnp.mean(vectors_concat)
self.std = jnp.std(vectors_concat)
print(self.mean, self.std)
def _transform(self, x, mean, std):
return (x - mean) / std
def _inverse_transform(self, x, mean, std):
return (x * std) + mean
def __call__(self, vector):
return self._transform(vector, self.mean, self.std)
def inverse(self, vector):
return self._inverse_transform(vector, self.mean, self.std)
|
google/deluca
|
deluca/lung/utils/data/transform.py
|
Python
|
apache-2.0
| 1,238
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
project_id = os.getenv('GCLOUD_PROJECT')
# TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable
# END TODO
# TODO: Import the storage module
# END TODO
# TODO: Create a client for Cloud Storage
# END TODO
# TODO: Use the client to get the Cloud Storage bucket
# END TODO
"""
Uploads a file to a given Cloud Storage bucket and returns the public url
to the new object.
"""
def upload_file(image_file, public):
pass
# TODO: Use the bucket to get a blob object
# END TODO
# TODO: Use the blob to upload the file
# END TODO
# TODO: Make the object public
# END TODO
# TODO: Modify to return the blob's Public URL
# END TODO
|
GoogleCloudPlatform/training-data-analyst
|
courses/developingapps/v1.3/python/cloudstorage/start/quiz/gcp/storage.py
|
Python
|
apache-2.0
| 1,312
|
"""
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf', gamma=.5).fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, edgecolors='black')
plt.title(titles[i])
plt.suptitle("Unlabeled points are colored white", y=0.1)
plt.show()
|
chrsrds/scikit-learn
|
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
|
Python
|
bsd-3-clause
| 2,391
|
from TestBase import TestBase
testfile = 'BIOV/cust_BIOV.20131112173319.ok'
class TestWorker(TestBase):
def setUp(self):
TestBase.setUp(self)
self.mc.src_cur = self.mc.src_main_dir+testfile
self.mc.keep_in_memory = True
self.mc.work()
first = self.mc.data_array.items()[0]
#print str(first)
self.mc.data_in = first[1]
### ab hier work_ds prozedur = self.mc.work_ds(first)
self.mc.initDataStore()
self.mc.set_operation( first[1] )
|
groovehunter/xmlflat2db
|
test/TestWorker.py
|
Python
|
gpl-2.0
| 523
|
__author__ = 'baohua'
__doc__ = 'glance specific checks'
|
yeasy/tripled
|
tripled/case/glance/__init__.py
|
Python
|
apache-2.0
| 57
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_pitchtools_Octave_from_pitch_name_01():
assert pitchtools.Octave.from_pitch_name("cs'") == 4
assert pitchtools.Octave.from_pitch_name('cs') == 3
assert pitchtools.Octave.from_pitch_name('cs,') == 2
|
mscuthbert/abjad
|
abjad/tools/pitchtools/test/test_pitchtools_Octave_from_pitch_name.py
|
Python
|
gpl-3.0
| 267
|
import os
import re
import sys
import socket
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import configuration
import lib
SessionsMixin = lib.make_sessions_mixin([('otherrods', 'apass')], [('alice', 'password'), ('anonymous', None)])
class Test_Iticket(SessionsMixin, unittest.TestCase):
def setUp(self):
super(Test_Iticket, self).setUp()
self.admin = self.admin_sessions[0]
self.user = self.user_sessions[0]
self.anon = self.user_sessions[1]
def tearDown(self):
super(Test_Iticket, self).tearDown()
def test_iticket_bad_subcommand(self):
self.admin.assert_icommand('iticket badsubcommand', 'STDOUT_SINGLELINE', 'unrecognized command')
def test_iticket_get(self):
filename = 'TicketTestFile'
filepath = os.path.join(self.admin.local_session_dir, filename)
lib.make_file(filepath, 1)
collection = self.admin.session_collection + '/dir'
data_obj = collection + '/' + filename
self.admin.assert_icommand('imkdir ' + collection)
self.admin.assert_icommand('iput ' + filepath + ' ' + data_obj)
self.admin.assert_icommand('ils -l ' + collection, 'STDOUT')
self.user.assert_icommand('ils -l ' + collection, 'STDERR')
self.anon.assert_icommand('ils -l ' + collection, 'STDERR')
self.ticket_get_on(data_obj, data_obj)
self.ticket_get_on(collection, data_obj)
def test_iticket_put(self):
filename = 'TicketTestFile'
filepath = os.path.join(self.admin.local_session_dir, filename)
lib.make_file(filepath, 1)
collection = self.admin.session_collection + '/dir'
data_obj = collection + '/' + filename
self.admin.assert_icommand('imkdir ' + collection)
self.admin.assert_icommand('iput ' + filepath + ' ' + data_obj)
self.admin.assert_icommand('ils -l ' + collection, 'STDOUT')
self.user.assert_icommand('ils -l ' + collection, 'STDERR')
self.anon.assert_icommand('ils -l ' + collection, 'STDERR')
self.ticket_put_on(data_obj, data_obj, filepath)
self.ticket_put_on(collection, data_obj, filepath)
def ticket_get_on(self, ticket_target, data_obj):
ticket = 'ticket'
self.ticket_get_fail(ticket, data_obj)
self.admin.assert_icommand('iticket create read ' + ticket_target + ' ' + ticket)
self.admin.assert_icommand('iticket ls', 'STDOUT')
self.ticket_get(ticket, data_obj)
self.ticket_group_get(ticket, data_obj)
self.ticket_user_get(ticket, data_obj)
self.ticket_host_get(ticket, data_obj)
self.ticket_expire_get(ticket, data_obj)
self.admin.assert_icommand('iticket ls ' + ticket, 'STDOUT')
#self.admin.assert_icommand('iticket delete ' + ticket)
#self.admin.assert_icommand('iticket create read ' + ticket_target + ' ' + ticket)
#self.ticket_uses_get(ticket, data_obj)
#self.admin.assert_icommand('iticket delete ' + ticket)
#self.admin.assert_icommand('iticket create read ' + ticket_target + ' ' + ticket)
#self.ticket_uses_get(ticket, data_obj)
self.admin.assert_icommand('iticket delete ' + ticket)
def ticket_put_on(self, ticket_target, data_obj, filepath):
ticket = 'faketicket'
self.ticket_put_fail(ticket, data_obj, filepath)
_, out, _ = self.admin.assert_icommand(['iticket', 'create', 'write', ticket_target], 'STDOUT_SINGLELINE', 'ticket:')
ticket = out.rpartition(':')[2].rstrip('\n')
assert len(ticket) == 15, ticket
ticket_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
for c in ticket:
assert c in ticket_chars, c
self.admin.assert_icommand('iticket mod ' + ticket + ' write-file 0')
self.admin.assert_icommand('iticket ls', 'STDOUT')
self.ticket_put(ticket, data_obj, filepath)
self.ticket_group_put(ticket, data_obj, filepath)
self.ticket_user_put(ticket, data_obj, filepath)
self.ticket_host_put(ticket, data_obj, filepath)
self.ticket_expire_put(ticket, data_obj, filepath)
self.admin.assert_icommand('iticket ls ' + ticket, 'STDOUT')
#self.admin.assert_icommand('iticket delete ' + ticket)
#self.admin.assert_icommand('iticket create write ' + ticket_target + ' ' + ticket)
#self.ticket_uses_put(ticket, data_obj, filepath)
#self.admin.assert_icommand('iticket delete ' + ticket)
#self.admin.assert_icommand('iticket create write ' + ticket_target + ' ' + ticket)
#self.admin.assert_icommand('iticket delete ' + ticket)
#self.admin.assert_icommand('iticket create write ' + ticket_target + ' ' + ticket)
#self.ticket_uses_put(ticket, data_obj, filepath)
#self.ticket_bytes_put(ticket, data_obj)
self.admin.assert_icommand('iticket delete ' + ticket)
def ticket_get(self, ticket, data_obj):
self.user.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.user.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
def ticket_put(self, ticket, data_obj, filepath):
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
def ticket_get_fail(self, ticket, data_obj):
self.user.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDERR')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDERR')
def ticket_put_fail(self, ticket, data_obj, filepath):
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
def ticket_uses_get(self, ticket, data_obj):
self.admin.assert_icommand('iticket mod ' + ticket + ' uses 2')
self.anon.assert_icommand('iget ' + data_obj + ' -', 'STDERR')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDERR')
self.admin.assert_icommand('iticket ls', 'STDOUT')
self.admin.assert_icommand('iticket mod ' + ticket + ' uses 0')
def ticket_uses_put(self, ticket, data_obj, filepath):
self.admin.assert_icommand('iticket mod ' + ticket + ' write-file 3')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' write-file 6')
self.admin.assert_icommand('iticket ls', 'STDOUT')
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' write-file 0')
self.admin.assert_icommand('iticket mod ' + ticket + ' uses 3')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' uses 6')
self.admin.assert_icommand('iticket ls', 'STDOUT')
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' uses 0')
def ticket_group_get(self, ticket, data_obj):
group = 'group'
self.admin.assert_icommand('iadmin mkgroup ' + group)
self.admin.assert_icommand('iadmin atg ' + group + ' ' + self.user.username)
self.admin.assert_icommand('iticket mod ' + ticket + ' add group ' + group)
self.user.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDERR')
self.user.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' remove group ' + group)
self.admin.assert_icommand('iadmin rfg ' + group + ' ' + self.user.username)
self.admin.assert_icommand(['iadmin', 'rmgroup', group])
def ticket_group_put(self, ticket, data_obj, filepath):
group = 'group'
self.admin.assert_icommand('iadmin mkgroup ' + group)
self.admin.assert_icommand('iadmin atg ' + group + ' ' + self.user.username)
self.admin.assert_icommand('iticket mod ' + ticket + ' add group ' + group)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' remove group ' + group)
self.admin.assert_icommand('iadmin rfg ' + group + ' ' + self.user.username)
self.admin.assert_icommand('iadmin rmgroup ' + group)
def ticket_user_get(self, ticket, data_obj):
self.admin.assert_icommand('iticket mod ' + ticket + ' add user ' + self.user.username)
self.user.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDERR')
self.user.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDOUT')
self.anon.assert_icommand('iget -t ' + ticket + ' ' + data_obj + ' -', 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' remove user ' + self.user.username)
def ticket_user_put(self, ticket, data_obj, filepath):
self.admin.assert_icommand('iticket mod ' + ticket + ' add user ' + self.user.username)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.anon.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' remove user ' + self.user.username)
def ticket_host_get(self, ticket, data_obj):
host = lib.get_hostname()
not_host = '0.0.0.0'
self.admin.assert_icommand('iticket mod ' + ticket + ' add host ' + not_host)
self.ticket_get_fail(ticket, data_obj)
self.admin.assert_icommand('iticket mod ' + ticket + ' add host ' + host)
self.ticket_get(ticket, data_obj)
self.admin.assert_icommand('iticket mod ' + ticket + ' remove host ' + not_host)
self.ticket_get(ticket, data_obj)
self.admin.assert_icommand('iticket mod ' + ticket + ' remove host ' + host)
def ticket_host_put(self, ticket, data_obj, filepath):
host = lib.get_hostname()
not_host = '0.0.0.0'
self.admin.assert_icommand('iticket mod ' + ticket + ' add host ' + not_host)
self.ticket_put_fail(ticket, data_obj, filepath)
self.admin.assert_icommand('iticket mod ' + ticket + ' add host ' + host)
self.ticket_put(ticket, data_obj, filepath)
self.admin.assert_icommand('iticket mod ' + ticket + ' remove host ' + not_host)
self.ticket_put(ticket, data_obj, filepath)
self.admin.assert_icommand('iticket mod ' + ticket + ' remove host ' + host)
def ticket_expire_get(self, ticket, data_obj):
past_date = "1970-01-01"
future_date = "2040-12-12"
self.admin.assert_icommand('iticket mod ' + ticket + ' expire ' + past_date)
self.ticket_get_fail(ticket, data_obj)
self.admin.assert_icommand('iticket mod ' + ticket + ' expire ' + future_date)
self.ticket_get(ticket, data_obj)
self.admin.assert_icommand('iticket mod ' + ticket + ' expire 0')
self.ticket_get(ticket, data_obj)
def ticket_expire_put(self, ticket, data_obj, filepath):
past_date = "1970-01-01"
future_date = "2040-12-12"
self.admin.assert_icommand('iticket mod ' + ticket + ' expire ' + past_date)
self.ticket_put_fail(ticket, data_obj, filepath)
self.admin.assert_icommand('iticket mod ' + ticket + ' expire ' + future_date)
self.ticket_put(ticket, data_obj, filepath)
self.admin.assert_icommand('iticket mod ' + ticket + ' expire 0')
self.ticket_put(ticket, data_obj, filepath)
def ticket_bytes_put(self, ticket, data_obj, filepath):
lib.make_file(filepath, 2)
self.admin.assert_icommand('iticket mod ' + ticket + ' write-byte 6')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' write-byte 8')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj, 'STDERR')
self.admin.assert_icommand('iticket mod ' + ticket + ' write-byte 0')
self.user.assert_icommand('iput -ft ' + ticket + ' ' + filepath + ' ' + data_obj)
|
janiheikkinen/irods
|
tests/pydevtest/test_iticket.py
|
Python
|
bsd-3-clause
| 15,559
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import lstm_ops
block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access
class LSTMBlockCellTest(tf.test.TestCase):
_use_gpu = False
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = tf.contrib.rnn.LSTMBlockCell(cell_size)
x = tf.placeholder(tf.float32, shape=(None, None, input_dim))
output, _ = tf.nn.dynamic_rnn(cell, x, time_major=True, dtype=tf.float32)
sess.run(tf.initialize_all_variables())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = tf.nn.rnn_cell.MultiRNNCell(
[tf.contrib.rnn.LSTMBlockCell(2)] * 2,
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3],
{x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])})
self.assertEqual(len(res), 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testLSTMBasicToBlockCell(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
x = tf.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)] * 2,
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.initialize_all_variables()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3],
{x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val})
with tf.variable_scope("block", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = tf.nn.rnn_cell.MultiRNNCell(
[tf.contrib.rnn.LSTMBlockCell(2)] * 2,
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.initialize_all_variables()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3],
{x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlockCellPeeping(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
x = tf.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.LSTMCell(2,
use_peepholes=True,
state_is_tuple=True)] * 2,
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.initialize_all_variables()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3],
{x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val})
with tf.variable_scope("block", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = tf.nn.rnn_cell.MultiRNNCell(
[tf.contrib.rnn.LSTMBlockCell(2, use_peephole=True)] * 2,
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.initialize_all_variables()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3],
{x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlock(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 5
inputs = []
for _ in range(sequence_length):
inp = tf.convert_to_tensor(
np.random.randn(batch_size, input_size),
dtype=tf.float32)
inputs.append(inp)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
sess.run([tf.initialize_all_variables()])
basic_outputs = sess.run(outputs)
basic_grads = sess.run(tf.gradients(outputs, inputs))
basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))
with tf.variable_scope("block", initializer=initializer):
w = tf.get_variable("w",
shape=[input_size + cell_size, cell_size * 4],
dtype=tf.float32)
b = tf.get_variable("b",
shape=[cell_size * 4],
dtype=tf.float32,
initializer=tf.zeros_initializer)
_, _, _, _, _, _, outputs = block_lstm(
tf.convert_to_tensor(sequence_length,
dtype=tf.int64),
inputs,
w,
b,
cell_clip=0)
sess.run([tf.initialize_all_variables()])
block_outputs = sess.run(outputs)
block_grads = sess.run(tf.gradients(outputs, inputs))
block_wgrads = sess.run(tf.gradients(outputs, [w, b]))
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 5
inputs = []
for _ in range(sequence_length):
inp = tf.convert_to_tensor(
np.random.randn(batch_size, input_size),
dtype=tf.float32)
inputs.append(inp)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.LSTMCell(cell_size,
use_peepholes=True,
state_is_tuple=True)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
sess.run([tf.initialize_all_variables()])
basic_outputs = sess.run(outputs)
basic_grads = sess.run(tf.gradients(outputs, inputs))
basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))
with tf.variable_scope("block", initializer=initializer):
w = tf.get_variable("w",
shape=[input_size + cell_size, cell_size * 4],
dtype=tf.float32)
b = tf.get_variable("b",
shape=[cell_size * 4],
dtype=tf.float32,
initializer=tf.zeros_initializer)
wci = tf.get_variable("wci", shape=[cell_size], dtype=tf.float32)
wcf = tf.get_variable("wcf", shape=[cell_size], dtype=tf.float32)
wco = tf.get_variable("wco", shape=[cell_size], dtype=tf.float32)
_, _, _, _, _, _, outputs = block_lstm(
tf.convert_to_tensor(sequence_length,
dtype=tf.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
sess.run([tf.initialize_all_variables()])
block_outputs = sess.run(outputs)
block_grads = sess.run(tf.gradients(outputs, inputs))
block_wgrads = sess.run(tf.gradients(outputs, [w, b, wci, wcf, wco]))
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
class LSTMBlockCellGpuTest(LSTMBlockCellTest):
_use_gpu = True
if __name__ == "__main__":
tf.test.main()
|
naturali/tensorflow
|
tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
|
Python
|
apache-2.0
| 11,637
|
#!/usr/bin/env python
import os
print('2.3')
os.system('mvn clean test')
|
izrik/maven-auto-versioning-poc
|
build_script.py
|
Python
|
gpl-2.0
| 77
|
import subprocess
import os
class VM(object):
def __init__(self, vm_dict, iso_dir, work_dir, br_names):
self.name = vm_dict['name']
self.version = vm_dict['version']
self.start_port = vm_dict['port_list']['start_port']
self.end_port = vm_dict['port_list']['end_port']
self.connections = []
self.disk = ''
self.cdrom = ''
self.extra_commands = {}
for i in xrange((self.end_port+1-self.start_port)):
self.connections.append(br_names['dummy'])
self.connections.insert(0,br_names['mgmt'])
if 'boot_device' in vm_dict.keys():
self.extra_commands['boot_device'] = vm_dict['boot_device']
if 'machine_type' in vm_dict.keys():
self.extra_commands['machine_type'] = vm_dict['machine_type']
if 'cpu_type' in vm_dict.keys():
self.extra_commands['cpu_type'] =''
self.console = vm_dict['console']
if os.path.exists(os.path.join(iso_dir,self.version+'.iso')):
self.cdrom = os.path.join(iso_dir, self.version+'.iso')
else:
raise ValueError("ISO not present for %s" % self.name)
if os.path.exists(os.path.join(iso_dir,self.version+'.vmdk')):
subprocess.call(['cp', \
os.path.join(iso_dir, self.version+'.vmdk'), work_dir])
self.disk = os.path.join(work_dir, self.version+'.vmdk')
def fill_connection(self, endpoint, conn_name, br_names):
if not endpoint['port'] in \
xrange(self.start_port, self.end_port+1):
raise ValueError("Invalid port number for %s in connections"\
% endpoint['name'])
if self.connections[endpoint['port']-self.start_port+1] == \
br_names['dummy']:
self.connections[endpoint['port']-self.start_port+1]=conn_name
else:
raise ValueError("Port %d already used in %s", \
endpoint['port'], self.name)
|
intfrr/Network_Topology
|
vm.py
|
Python
|
apache-2.0
| 1,991
|
from statsmodels.compat.python import (lrange, iterkeys, iteritems, lzip,
reduce, itervalues, zip, string_types,
range)
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
import datetime
import textwrap
from .table import SimpleTable
from .tableformatting import fmt_latex, fmt_txt
class Summary(object):
def __init__(self):
self.tables = []
self.settings = []
self.extra_txt = []
self.title = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
'''Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header: bool
Reproduce the DataFrame column labels in summary table
index: bool
Reproduce the DataFrame row labels in summary table
float_format: string
Formatting to float data columns
align : string
Data alignment (l/c/r)
'''
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings)
def add_array(self, array, align='r', float_format="%.4f"):
'''Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format: string
Formatting to array if type is float
align : string
Data alignment (l/c/r)
'''
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align)
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
'''Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols: int
Number of columns of the output table
align : string
Data alignment (l/c/r)
'''
keys = [_formatter(x, float_format) for x in iterkeys(d)]
vals = [_formatter(x, float_format) for x in itervalues(d)]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align)
def add_text(self, string):
'''Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indendented.
'''
self.extra_txt.append(string)
def add_title(self, title=None, results=None):
'''Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
'''
if isinstance(title, string_types):
self.title = title
else:
try:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
except:
self.title = ''
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
'''Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_formatting: string
Float formatting for summary of parameters (optional)
title : string
Title of the summary table (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
'''
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results)
def as_text(self):
'''Generate ASCII Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
#TODO: this isn't used anywhere?
rule_dash = widest * '-'
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest/2 - len(title)/2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out
def as_html(self):
'''Generate HTML Summary Table
'''
tables = self.tables
settings = self.settings
#TODO: this isn't used anywhere
title = self.title
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
return tab
def as_latex(self):
'''Generate LaTeX Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '} \\\\'
else:
title = '\\caption{}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\\hline\n'.join(tab)
out = '\\begin{table}', title, tab, '\\end{table}'
out = '\n'.join(out)
return out
def _measure_tables(tables, settings):
'''Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
'''
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = tables[i].shape[1] - 1
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length)
# Useful stuff
_model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'NBin': 'Negative binomial model',
'GLM' : 'Generalized linear model'
}
def summary_model(results):
'''Create a dict with information about the model
'''
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = OrderedDict()
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
info['R-squared:'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared:'] = lambda x: "%#8.3f" % x.rsquared_adj
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = OrderedDict()
for key, func in iteritems(info):
try:
out[key] = func(results)
# NOTE: some models don't have loglike defined (RLM), so that's NIE
except (AttributeError, KeyError, NotImplementedError):
pass
return out
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
'''create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
'''
if isinstance(results, tuple):
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if not xname:
data.index = results.model.exog_names
else:
data.index = xname
return data
# Vertical summary instance for multiple models
def _col_params(result, float_format='%.4f', stars=True):
'''Stack coefficients and standard errors in single column
'''
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[:2]:
res[col] = res[col].apply(lambda x: float_format % x)
# Std.Errors in parentheses
res.ix[:, 1] = '(' + res.ix[:, 1] + ')'
# Significance stars
if stars:
idx = res.ix[:, 3] < .1
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .05
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .01
res.ix[idx, 0] = res.ix[idx, 0] + '*'
# Stack Coefs and Std.Errors
res = res.ix[:, :2]
res = res.stack()
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res
def _col_info(result, info_dict=None):
'''Stack model info in a column
'''
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out
def _make_unique(list_of_names):
if len(set(list_of_names)) == len(list_of_names):
return list_of_names
# pandas does not like it if multiple columns have the same names
from collections import defaultdict
name_counter = defaultdict(str)
header = []
for _name in list_of_names:
name_counter[_name] += "I"
header.append(_name+" " + name_counter[_name])
return header
def summary_col(results, float_format='%.4f', model_names=[], stars=False,
info_dict=None, regressor_order=[]):
"""
Summarize multiple results instances side-by-side (coefs and SEs)
Parameters
----------
results : statsmodels results instance or list of result instances
float_format : string
float format for coefficients and standard errors
Default : '%.4f'
model_names : list of strings of length len(results) if the names are not
unique, a roman number will be appended to all model names
stars : bool
print significance stars
info_dict : dict
dict of lambda functions to be applied to results instances to retrieve
model info. To use specific information for different models, add a
(nested) info_dict with model name as the key.
Example: `info_dict = {"N":..., "R2": ..., "OLS":{"R2":...}}` would
only show `R2` for OLS regression models, but additionally `N` for
all other results.
Default : None (use the info_dict specified in
result.default_model_infos, if this property exists)
regressor_order : list of strings
list of names of the regressors in the desired order. All regressors
not specified will be appended to the end of the list.
"""
if not isinstance(results, list):
results = [results]
cols = [_col_params(x, stars=stars, float_format=float_format) for x in
results]
# Unique column names (pandas has problems merging otherwise)
if model_names:
colnames = _make_unique(model_names)
else:
colnames = _make_unique([x.columns[0] for x in cols])
for i in range(len(cols)):
cols[i].columns = [colnames[i]]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
summ = reduce(merg, cols)
if regressor_order:
varnames = summ.index.get_level_values(0).tolist()
ordered = [x for x in regressor_order if x in varnames]
unordered = [x for x in varnames if x not in regressor_order + ['']]
order = ordered + list(np.unique(unordered))
f = lambda idx: sum([[x + 'coef', x + 'stde'] for x in idx], [])
summ.index = f(np.unique(varnames))
summ = summ.reindex(f(order))
summ.index = [x[:-4] for x in summ.index]
idx = pd.Series(lrange(summ.shape[0])) % 2 == 1
summ.index = np.where(idx, '', summ.index.get_level_values(0))
# add infos about the models.
if info_dict:
cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,
info_dict)) for x in results]
else:
cols = [_col_info(x, getattr(x, "default_model_infos", None)) for x in
results]
# use unique column names, otherwise the merge will not succeed
for df , name in zip(cols, _make_unique([df.columns[0] for df in cols])):
df.columns = [name]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
info = reduce(merg, cols)
dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error
dat.columns = summ.columns
dat.index = pd.Index(summ.index.tolist() + info.index.tolist())
summ = dat
summ = summ.fillna('')
smry = Summary()
smry.add_df(summ, header=True, align='l')
smry.add_text('Standard errors in parentheses.')
if stars:
smry.add_text('* p<.1, ** p<.05, ***p<.01')
return smry
def _formatter(element, float_format='%.4f'):
try:
out = float_format % element
except:
out = str(element)
return out.strip()
def _df_to_simpletable(df, align='r', float_format="%.4f", header=True,
index=True, table_dec_above='-', table_dec_below=None,
header_dec_below='-', pad_col=0, pad_index=0):
dat = df.copy()
dat = dat.applymap(lambda x: _formatter(x, float_format))
if header:
headers = [str(x) for x in dat.columns.tolist()]
else:
headers = None
if index:
stubs = [str(x) + int(pad_index) * ' ' for x in dat.index.tolist()]
else:
dat.ix[:, 0] = [str(x) + int(pad_index) * ' ' for x in dat.ix[:, 0]]
stubs = None
st = SimpleTable(np.array(dat), headers=headers, stubs=stubs,
ltx_fmt=fmt_latex, txt_fmt=fmt_txt)
st.output_formats['latex']['data_aligns'] = align
st.output_formats['txt']['data_aligns'] = align
st.output_formats['txt']['table_dec_above'] = table_dec_above
st.output_formats['txt']['table_dec_below'] = table_dec_below
st.output_formats['txt']['header_dec_below'] = header_dec_below
st.output_formats['txt']['colsep'] = ' ' * int(pad_col + 1)
return st
def _simple_tables(tables, settings, pad_col=None, pad_index=None):
simple_tables = []
float_format = '%.4f'
if pad_col is None:
pad_col = [0] * len(tables)
if pad_index is None:
pad_index = [0] * len(tables)
for i, v in enumerate(tables):
index = settings[i]['index']
header = settings[i]['header']
align = settings[i]['align']
simple_tables.append(_df_to_simpletable(v, align=align,
float_format=float_format,
header=header, index=index,
pad_col=pad_col[i],
pad_index=pad_index[i]))
return simple_tables
|
hlin117/statsmodels
|
statsmodels/iolib/summary2.py
|
Python
|
bsd-3-clause
| 19,583
|
# -*- coding: utf-8 -*-
try:
from django.conf.urls import patterns, include, url
except ImportError:
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^retorno/pagseguro/', include('pagseguro.urls')),
)
|
mateuspadua/django-pagseguro2
|
testapp/testapp/urls.py
|
Python
|
mit
| 419
|
import teneto
import pytest
import numpy as np
import pandas as pd
def test_errors():
# Make sure that only 1 of three different input methods is specified
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_dict={}, from_array=np.zeros([2, 2]))
# Make sure error raised from_array if not a numpy array
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_array=[1, 2, 3])
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_array=np.array([2]))
# Make sure error raised from_dict if not a dictionary
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_dict=[1, 2, 3])
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_dict={})
# Make sure error raised edge_list if not a list of lists
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_edgelist='1,2,3')
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_edgelist=[[0, 1], [0, 1, 2, 3]])
# Make sure error raised df is not pandas
with pytest.raises(ValueError):
teneto.TemporalNetwork(from_df={})
with pytest.raises(ValueError):
df = pd.DataFrame({'i': [1, 2], 'j': [0, 1]})
teneto.TemporalNetwork(from_df=df)
# Make sure error raised when nettype is wrong
with pytest.raises(ValueError):
teneto.TemporalNetwork(nettype='s')
with pytest.raises(ValueError):
teneto.TemporalNetwork(timetype='s')
with pytest.raises(ValueError):
teneto.TemporalNetwork(N='s')
with pytest.raises(ValueError):
teneto.TemporalNetwork(T='s')
edgelist = [[0, 1, 2, 0.5], [0, 2, 1, 0.5]]
tnet = teneto.TemporalNetwork(from_edgelist=edgelist)
with pytest.raises(ValueError):
tnet.calc_networkmeasure('skmdla')
with pytest.raises(ValueError):
tnet.generatenetwork('skmdla')
def test_define_tnet_unweighted():
tnet = teneto.TemporalNetwork(nettype='wu', timetype='discrete')
if not tnet.network.shape[1] == 4:
raise AssertionError()
tnet = teneto.TemporalNetwork(nettype='bu')
if not tnet.network.shape[1] == 3:
raise AssertionError()
edgelist = [[0, 1, 2], [0, 2, 1]]
tnet_edgelist = teneto.TemporalNetwork(from_edgelist=edgelist)
if not tnet_edgelist.network.shape == (2, 3):
raise AssertionError()
array = np.zeros([3, 3, 3])
array[[0, 0], [1, 2], [2, 1]] = 1
tnet_array = teneto.TemporalNetwork(from_array=array)
if not all(tnet_array.network == tnet_edgelist.network):
raise AssertionError()
tnet_df = teneto.TemporalNetwork(from_df=tnet_array.network)
if not all(tnet_array.network == tnet_df.network):
raise AssertionError()
contact = teneto.utils.graphlet2contact(array)
tnet_dict = teneto.TemporalNetwork(from_dict=contact)
if not all(tnet_dict.network == tnet_edgelist.network):
raise AssertionError()
tnet_edgelist.add_edge([[0, 3, 1]])
if not all(tnet_edgelist.network.iloc[-1].values == [0, 3, 1]):
raise AssertionError()
if not tnet_edgelist.network.shape == (3, 3):
raise AssertionError()
tnet_edgelist.add_edge([0, 3, 1])
if not all(tnet_edgelist.network.iloc[-1].values == [0, 3, 1]):
raise AssertionError()
tnet_edgelist.drop_edge([0, 3, 1])
if not tnet_edgelist.network.shape == (2, 3):
raise AssertionError()
def test_define_tnet_weighted():
tnet = teneto.TemporalNetwork(nettype='wu', timetype='discrete')
if not tnet.network.shape[1] == 4:
raise AssertionError()
tnet = teneto.TemporalNetwork(nettype='bu')
if not tnet.network.shape[1] == 3:
raise AssertionError()
edgelist = [[0, 1, 2, 0.5], [0, 2, 1, 0.5]]
tnet_edgelist = teneto.TemporalNetwork(from_edgelist=edgelist)
if not tnet_edgelist.network.shape == (2, 4):
raise AssertionError()
array = np.zeros([3, 3, 3])
array[[0, 0], [1, 2], [2, 1]] = 0.5
tnet_array = teneto.TemporalNetwork(from_array=array)
if not all(tnet_array.network == tnet_edgelist.network):
raise AssertionError()
contact = teneto.utils.graphlet2contact(array)
tnet_dict = teneto.TemporalNetwork(from_dict=contact)
if not all(tnet_dict.network == tnet_edgelist.network):
raise AssertionError()
tnet_edgelist.add_edge([[0, 3, 1, 0.8]])
if not all(tnet_edgelist.network.iloc[-1].values == [0, 3, 1, 0.8]):
raise AssertionError()
if not tnet_edgelist.network.shape == (3, 4):
raise AssertionError()
tnet_edgelist.drop_edge([[0, 3, 1]])
if not tnet_edgelist.network.shape == (2, 4):
raise AssertionError()
def test_tnet_functions():
array = np.zeros([3, 3, 3])
array[[0, 0], [1, 2], [2, 1]] = 1
array = array + array.transpose([1, 0, 2])
tnet = teneto.TemporalNetwork(from_array=array)
array = teneto.utils.set_diagonal(array, 0)
degree = tnet.calc_networkmeasure('temporal_degree_centrality')
if not all(array.sum(axis=-1).sum(axis=-1) == degree):
raise AssertionError()
array = np.zeros([3, 3, 3])
array[[0, 0], [1, 2], [2, 1]] = 0.5
array = array + array.transpose([1, 0, 2])
array = teneto.utils.set_diagonal(array, 0)
tnet = teneto.TemporalNetwork(from_array=array)
degree = tnet.calc_networkmeasure('temporal_degree_centrality')
if not all(array.sum(axis=-1).sum(axis=-1) == degree):
raise AssertionError()
def test_generatenetwork():
tnet = teneto.TemporalNetwork()
tnet.generatenetwork('rand_binomial', size=(5, 10), prob=1)
if not tnet.netshape == (5, 10):
raise AssertionError()
def test_plot():
tnet = teneto.TemporalNetwork()
tnet.generatenetwork('rand_binomial', size=(5, 10), prob=1)
tnet.plot('graphlet_stack_plot')
def test_metadata():
tnet = teneto.TemporalNetwork(nodelabels=['A', 'B', 'contact'], timelabels=[
0, 1, 2], desc='test meta data', starttime=0, timeunit='au')
if not tnet.nodelabels == ['A', 'B', 'contact']:
raise AssertionError()
if not tnet.timelabels == [0, 1, 2]:
raise AssertionError()
if not tnet.starttime == 0:
raise AssertionError()
if not tnet.desc == 'test meta data':
raise AssertionError()
if not tnet.timeunit == 'au':
raise AssertionError()
def test_hdf5():
df = pd.DataFrame({'i': [0, 0], 'j': [1, 2], 't': [0, 1]})
tnet = teneto.TemporalNetwork(from_df=df, hdf5=True)
if not tnet.network == './teneto_temporalnetwork.h5':
raise AssertionError()
df2 = pd.read_hdf('./teneto_temporalnetwork.h5')
if not (df == df2).all().all():
raise AssertionError()
tnet.add_edge([0, 2, 2])
df3 = pd.read_hdf('./teneto_temporalnetwork.h5')
if not (df3.iloc[2].values == [0, 2, 2]).all():
raise AssertionError()
tnet.drop_edge([0, 2, 2])
df4 = pd.read_hdf('./teneto_temporalnetwork.h5')
if not (df == df4).all().all():
raise AssertionError()
def test_hdf5_getnetwokwhen():
df = pd.DataFrame({'i': [0, 1], 'j': [1, 2], 't': [0, 1]})
tnet = teneto.TemporalNetwork(from_df=df, hdf5=True)
dfcheck = tnet.get_network_when(i=0)
if not (dfcheck.values == [0,1,0]).all():
raise AssertionError()
dfcheck = tnet.get_network_when(i=0,j=1,t=0,logic='and')
if not (dfcheck.values == [0,1,0]).all():
raise AssertionError()
dfcheck = tnet.get_network_when(i=0,j=1,t=1,logic='or')
if not (dfcheck.values == [[0, 1, 0],[1, 2, 1]]).all():
raise AssertionError()
dfcheck = tnet.get_network_when(t=0)
if not (dfcheck.values == [0,1,0]).all():
raise AssertionError()
dfcheck = tnet.get_network_when(ij=1)
if not (dfcheck.values == [[0, 1, 0],[1, 2, 1]]).all():
raise AssertionError()
|
wiheto/teneto
|
test/classes/test_temporalnetwork.py
|
Python
|
gpl-3.0
| 7,833
|
import requests
import os
import re
import time
from selenium import webdriver
import multiprocessing
import sys
from socket import error as SocketError
import errno
import argparse
import imghdr
import uuid
import csv
import codecs
import platform
import downloader
# define default chrome download path
global default_download_path
default_download_path = os.path.join(os.getcwd(), 'download_urls')
if not os.path.exists(default_download_path):
os.mkdir(default_download_path)
global isWindows
if re.search('windows', platform.platform(), re.IGNORECASE):
isWindows = True
else:
isWindows = False
# use selenium to get the list of URLs
def openBrowserRecursively(total, idName, browser):
try:
for i in range(total):
iterator = i * 100
url = r"https://www.google.com/search?q={word}&newwindow=1&biw=300&bih=629&tbm=isch&ijn={times}&start={start}"
try:
browser.get(url.format(word= idName, start=iterator,times = i))
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise # raise to reset the connection
pass
time.sleep(1.5) # 1.5 seconds is the tuned time for HKU service not to be monitored and closed
except:
if isWindows:
os.system("taskkill /im chrome.exe /F")
else :
os.system("kill " + str(os.getpid()))
openBrowserRecursively(total, idName, browser)
# basic session setup
def setupSession():
session = requests.Session()
session.header = { 'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0","Accept-Encoding": "gzip, deflate, sdch"}
return session
class GoogleDownloader():
def __init__(self, nameList, root, size, process, browser):
assert browser != None, "drive cannot be None!"
self.process = process
self.browser = browser
self.nameList = nameList
self.size = size
self.root = root
# main crawling start
def run(self):
for i in nameList:
self.oneID(i)
def oneID(self, name):
wordSearch = ''
subcategory = name.split(' ')
name = name.replace(' ', '_')
wordSearch = subcategory[0]
if len(subcategory[1:]) >= 1:
for pt in subcategory[1:]:
wordSearch += "+" + pt
print (wordSearch.encode('utf-8'))
total = int(self.size / 100)
openBrowserRecursively(total, wordSearch, self.browser)
# after trigger getting the file list, then the file will be
# download but name with f.txt
global default_download_path
filepath = default_download_path
try:
for i in range(total):
iterator = i * 100
filename = os.path.join("results", name +".txt")
newName = name + '_' + str(i) +'.txt'
# here is the hardcode part
# one may change to his or her own default downloading folder
if i == 0:
if "f.txt" in os.listdir(filepath):
print ("change name to be " , newName.encode('utf-8'))
os.rename(os.path.join(filepath,'f.txt'), os.path.join(filepath,newName))
else:
fileSpecial = "f (%d).txt" % i
if fileSpecial in os.listdir(filepath):
print ("change name to be " , newName.encode('utf-8'))
os.rename(os.path.join(filepath,fileSpecial), os.path.join(filepath,newName))
else:
print ("fail to find the file")
except:
print("something bad happen, maybe encountering some repeated names")
os.remove(os.path.join(filepath, 'f.txt'))
return
# after rename and locate the url list, then we conduct the final crawling part
indexList = [i for i in range(1, 101)]
try:
folderName = self.makeFolder(name)
for i in range(total):
newName = name + '_' + str(i) +'.txt'
with codecs.open(os.path.join(filepath,newName),'r', encoding="utf-8") as myfile:
file1 = myfile.read()
results = re.findall(r'"ou":"(.+?)"',file1)
self.process.map(_download,
zip(results, [folderName] * len(results), indexList[:len(results)]))
fileList = os.listdir(folderName)
self.dump_imInfo(folderName, sorted(fileList, key=lambda x: int(x.split('.')[0])), results)
except IOError:
print ("can not find the file called:" , str(newName).encode('utf-8') , "and it may be caused by the bad connection or bad file got from server")
def makeFolder(self, fileName):
try:
if not os.path.exists(os.path.join(self.root, fileName)):
os.mkdir(os.path.join(self.root, fileName))
else:
print('duplicated root name')
except OSError as e:
if e.errno != 17:
raise
else:
pass
return os.path.join(self.root, fileName)
def dump_imInfo(self, folderName, fileList, results):
try:
with open(os.path.join(folderName, 'imInfo.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['img_name', 'uuid', 'url'])
for file in fileList:
index = int(file.split('.')[0])
writer.writerow([index,str(uuid.uuid4().hex),str(results[index-1])])
except:
print('error happens when writing imageInfo, maybe caused by duplicated name')
# function to get one image specified with one url
def _download(args):
url, folderName, index = args
session = setupSession()
try:
# time out is another parameter tuned
# fit for the network about 10Mb
image = session.get(url, timeout = 5)
imageName = str(index)
with open(os.path.join(folderName, imageName),'wb') as fout:
fout.write(image.content)
fileExtension = imghdr.what(os.path.join(folderName, imageName))
if fileExtension is None:
os.remove(os.path.join(folderName, imageName))
else:
newName = imageName + '.' + str(fileExtension)
os.rename(os.path.join(folderName, imageName), os.path.join(folderName, newName))
except Exception as e:
print ("failed to download one pages with url of " + str(url))
# basic funciton to get id list
def readFile(filename):
_list=[]
with codecs.open (filename, 'r', encoding='utf-8') as fin:
line = fin.readline()
while line:
_list.append(str(line).rstrip())
line = fin.readline()
return _list
def arg_parse():
parser = argparse.ArgumentParser(description='Argument Parser for google image downloader')
parser.add_argument('--root', help='output file root',
default='results', type=str)
parser.add_argument('--filename', help='the name of the file which constain the id',
default='testlist.txt', type=str)
parser.add_argument('--size', help='number of image per id',
default=100, type=int)
parser.add_argument('--process', help='number of process in parallel',
default=100, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = arg_parse()
start = time.time()
assert args.filename != None, "Name list cannot be None!"
# get all id as type of list of str
nameList = list(set(readFile(args.filename)))
# init processPool and browser driver
processPool = multiprocessing.Pool(args.process)
# init chrome driver with customized default download path
chromeOptions = webdriver.ChromeOptions()
preference = {'download.default_directory' : default_download_path,
'download.prompt_for_download': False}
chromeOptions.add_experimental_option("prefs",preference)
if isWindows:
chromedriver = os.path.join(os.getcwd(),'chromedriver.exe')
else:
chromedriver = os.path.join(os.getcwd(),'chromedriver')
browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=chromeOptions)
# check if the output folder exists or not
if not os.path.exists(args.root):
os.mkdir(args.root)
# construct the downloader instance
gdownloader = GoogleDownloader(nameList = nameList, root = args.root, size = args.size,
process = processPool, browser = browser)
gdownloader.run()
# finish running
end = time.time()
browser.close()
print ('task end, time consumed:', end - start, 'seconds')
|
whcacademy/imageDownloader
|
googleImageDownload.py
|
Python
|
mit
| 7,660
|
import click
from chakin.cli import pass_context, json_loads
from chakin.decorators import custom_exception, None_output
@click.command('export_fasta')
@click.argument("organism_id", type=int)
@click.option(
"--file",
help="If true, write to files in CWD",
is_flag=True
)
@pass_context
@custom_exception
@None_output
def cli(ctx, organism_id, file=False):
"""Export reference sequences as fasta.
Output:
None
"""
return ctx.gi.export.export_fasta(organism_id, file=file)
|
abretaud/python-chado
|
chakin/commands/export/export_fasta.py
|
Python
|
mit
| 503
|
import numpy as np
from os import environ, getenv
import sys
import matplotlib as mpl
mpl.use('cairo')
import matplotlib.pylab as pl
from matplotlib.colors import LogNorm
from matplotlib import pyplot as plt
import seaborn
DOPDF = True
## turn on/off gpu
def set_processor(name):
name = name.lower()
if name == 'cpu':
environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
environ["CUDA_VISIBLE_DEVICES"] = ""
return
if name == 'gpu':
del environ['CUDA_DEVICE_ORDER']
del environ['CUDA_VISIBLE_DEVICES']
return
sys.stderr.write('Unknown processor "%s"!\n'%name)
def get_processor():
if (getenv('CUDA_DEVICE_ORDER') == 'PCI_BUS_ID'
and getenv('CUDA_VISIBLE_DEVICES') == ''):
return 'cpu'
else:
return 'gpu'
## freeze layers of an NN
def freeze(model, on=False, to_skip = []):
def _act(l,o):
print ('un-frozen' if o else 'frozen')
l.trainable = o
for l in model.layers:
print l.name ,
_act(l, on == (l not in to_skip))
## general layout
seaborn.set(style="ticks")
seaborn.set_context("poster")
mpl.rcParams['axes.linewidth'] = 1.25
fig_size = plt.rcParams['figure.figsize']
fig_size[0] = 10
fig_size[1] = 9
plt.rcParams['figure.figsize'] = fig_size
## plotting
_epsilon = np.finfo(float).eps
def _clip(x):
return np.sign(x + _epsilon) * np.clip(np.abs(x), _epsilon, np.inf)
default_colors = np.concatenate([pl.cm.tab10(np.linspace(0,1,10)),
pl.cm.Dark2(np.linspace(0,1,9))])
def sanitize_mask(x):
return x==x
lognorm = LogNorm()
class NH1(object):
__slots__ = ['bins','_content','_sumw2']
def __init__(self, bins=[0,1]):
assert(len(bins) > 1)
self.bins = np.array(bins )
self._content = np.zeros(len(self.bins) - 1, dtype=np.float64)
self._sumw2 = np.zeros(len(self.bins) - 1, dtype=np.float64)
def iter(self):
for x in xrange(self.bins.shape[0]-1):
yield x
def find_bin(self, x):
for ix,edge in enumerate(self.bins):
if x <= edge:
return max(0, ix - 1)
return len(self.bins) - 1
def get_content(self, ix):
return self._content[ix]
def get_error(self, ix):
return np.sqrt(self._sumw2[ix])
def set_content(self, ix, val):
self._content[ix] = val
def set_error(self, ix, val):
self._sumw2[ix] = val * val;
def clear(self):
self._content *= 0
self._sumw2 *= 0
def fill(self, x, y=1):
ix = self.find_bin(x)
self._content[ix] += y
self._sumw2[ix] = pow(y, 2)
def fill_array(self, x, weights=None):
mask = sanitize_mask(x)
mask &= sanitize_mask(weights)
x_masked = x[mask]
weights_masked = None if (weights is None) else weights[mask]
w2 = None if (weights_masked is None) else np.square(weights_masked)
hist = np.histogram(x_masked, bins=self.bins, weights=weights_masked, density=False)[0]
herr = np.histogram(x_masked, bins=self.bins, weights=w2, density=False)[0]
self._content += hist
self._sumw2 += herr
def add_array(self, arr):
self._content += arr.astype(np.float64)
def save(self, fpath):
save_arr = np.array([
self.bins,
np.concatenate([self._content, [0]])
])
np.save(fpath, save_arr)
def _load(self, fpath):
load_arr = np.load(fpath)
self.bins = load_arr[0]
self._content = load_arr[1][:-1]
@classmethod
def load(x, fpath):
if isinstance(x, NH1):
x._load(fpath)
else:
h = NH1()
h._load(fpath)
return h
def add_from_file(self, fpath):
load_arr = np.load(fpath)
try:
assert(np.array_equal(load_arr[0], self.bins))
except AssertionError as e:
print fpath
print load_arr[0]
print self.bins
raise e
add_content = load_arr[1][:-1].astype(np.float64)
self._content += add_content
def clone(self):
new = NH1(self.bins)
new._content = np.array(self._content, copy=True)
new._sumw2 = np.array(self._sumw2, copy=True)
return new
def add(self, rhs, scale=1):
assert(self._content.shape == rhs._content.shape)
self._content += scale * rhs._content
self._sumw2 += scale * rhs._sumw2
def multiply(self, rhs):
assert(self._content.shape == rhs._content.shape)
self_rel = self._sumw2 / _clip(self._content)
rhs_rel = rhs._sumw2 / _clip(rhs._content)
self._content *= rhs._content
self._sumw2 = (np.power(self_rel, 2) + np.power(rhs_rel, 2)) * self._content
def divide(self, den, clip=False):
inv = den.clone()
inv.invert()
self.multiply(inv)
if clip:
self._content[den._content <= _epsilon] = 1
def integral(self, lo=None, hi=None):
if lo is None:
lo = 0
if hi is None:
hi = self._content.shape[0]
return np.sum(self._content[lo:hi])
def scale(self, scale=None):
norm = float(scale if (scale is not None) else 1./self.integral())
self._content *= norm
self._sumw2 *= (norm ** 2)
def invert(self):
for ix in range(self._content.shape[0]):
val = self._content[ix]
if val != 0:
relerr = np.sqrt(self._sumw2[ix])/val
self._content[ix] = 1./val
self._sumw2[ix] = relerr * self._content[ix]
else:
self._content[ix] = _epsilon
self._sumw2[ix] = 0
def quantile(self, eff, interp=False):
den = 1. / self.integral()
threshold = eff * self.integral()
for ib,b1 in enumerate(self.bins):
frac1 = self.integral(hi=ib)
if frac1 >= threshold:
if not interp or ib == 0:
return b1
frac2 = self.integral(hi=(ib-1))
b2 = self.bins[ib-1]
b0 = (b1 +
((threshold - frac1) *
(b2 - b1) / (frac2 - frac1)))
return b0
def eval_array(self, arr):
def f(x):
return self.get_content(self.find_bin(x))
f = np.vectorize(f)
return f(arr)
def plot(self, color, label, errors=False):
bin_centers = 0.5*(self.bins[1:] + self.bins[:-1])
if errors and np.max(np.abs(self._sumw2)) > 0:
errs = np.sqrt(self._sumw2)
else:
errs = None
plt.errorbar(bin_centers,
self._content,
yerr = errs,
drawstyle = 'steps-mid',
color=color,
label=label,
linewidth=2)
def mean(self):
sumw = 0
bin_centers = 0.5 * (self.bins[:-1] + self.bins[1:])
for ix in xrange(bin_centers.shape[0]):
sumw += bin_centers[ix] * self._content[ix+1]
return sumw / self.integral()
# def quantile(self, threshold):
# acc = 0
# threshold *= self.integral()
# for ix in xrange(self._content.shape[0]):
# acc += self._content[ix]
# if acc >= threshold:
# return 0.5 * (self.bins[ix] + self.bins[ix+1])
def median(self):
return self.quantile(eff = 0.5)
def stdev(self, sheppard = False):
# sheppard = True applies Sheppard's correction, assuming constant bin-width
mean = self.mean()
bin_centers = 0.5 * (self.bins[:-1] + self.bins[1:])
integral = self.integral()
variance = np.sum(bin_centers * bin_centers * self._content)
variance -= integral * mean * mean
variance /= (integral - 1)
if sheppard:
variance -= pow(self.bins[1] - self.bins[0], 2) / 12
return np.sqrt(max(0, variance))
class NH2(object):
__slots__ = ['binsx','binsy','_content','_sumw2']
def __init__(self, binsx, binsy):
self.binsx = binsx
self.binsy = binsy
self._content = np.zeros([len(binsx)-1, len(binsy)-1], dtype=np.float64)
self._sumw2 = np.zeros([len(binsx)-1, len(binsy)-1], dtype=np.float64)
def _find_bin(self, val, axis):
bins = self.binsx if (axis == 0) else self.binsy
for ix,x in enumerate(bins):
if val <= x:
return ix
return len(bins) - 1
def find_bin_x(self, val):
return self._find_bin(val, 0)
def find_bin_y(self, val):
return self._find_bin(val, 1)
def _project(self, onto_axis, min_bin=None, max_bin=None):
bins = self.binsx if (onto_axis == 0) else self.binsy
integrate_axis = 1 - onto_axis
h1 = NH1(bins)
if integrate_axis == 0:
s = self._content[min_bin:max_bin,:]
e = self._sumw2[min_bin:max_bin,:]
else:
s = self._content[:,min_bin:max_bin]
e = self._sumw2[:,min_bin:max_bin]
proj = np.sum(s, axis=integrate_axis)
proj_e = np.sum(e, axis=integrate_axis)
h1._content = proj
h1._sumw2 = proj_e
return h1
def _project_by_val(self, onto_axis, min_bin=None, min_cut=None, max_bin=None, max_cut=None):
integrate_axis = 1 - onto_axis
if min_cut:
min_bin = self._find_bin(min_cut, integrate_axis)
if max_cut:
max_bin = self._find_bin(max_cut, integrate_axis)
return self._project(onto_axis, min_bin, max_bin)
def project_onto_x(self, *args, **kwargs):
return self._project_by_val(0, *args, **kwargs)
def project_onto_y(self, *args, **kwargs):
return self._project_by_val(1, *args, **kwargs)
def fill(self, x, y, z=1):
self._content[self.find_bin_x(x), self.find_bin_y(y)] += z
self._sumw2[self.find_bin_x(x), self.find_bin_y(y)] += pow(z, 2)
def fill_array(self, x, y, weights=None):
mask = sanitize_mask(x)
mask &= sanitize_mask(y)
mask &= sanitize_mask(weights)
x_masked = x[mask]
y_masked = y[mask]
weights_masked = None if (weights is None) else weights[mask]
hist = np.histogram2d(x_masked, y_masked,
bins=(self.binsx, self.binsy),
weights=weights_masked,
normed=False)[0]
w2 = None if (weights_masked is None) else np.square(weights_masked)
herr = np.histogram2d(x_masked, y_masked,
bins=(self.binsx, self.binsy),
weights=w2,
normed=False)[0]
self._content += hist
self._sumw2 += herr
def integral(self):
return np.sum(self._content)
def scale(self, val=None):
if val is None:
val = self.integral()
self._content /= val
def plot(self, xlabel=None, ylabel=None, output=None, cmap=pl.cm.hot, norm=None):
plt.clf()
ax = plt.gca()
ax.grid(True,ls='-.',lw=0.4,zorder=-99,color='gray',alpha=0.7,which='both')
plt.imshow(self._content.T,
extent=(self.binsx[0], self.binsx[-1], self.binsy[0], self.binsy[-1]),
aspect=(self.binsx[-1]-self.binsx[0])/(self.binsy[-1]-self.binsy[0]),
cmap=cmap,
norm=norm,
)
plt.colorbar()
if xlabel:
plt.xlabel(xlabel, fontsize=24)
if ylabel:
plt.ylabel(ylabel, fontsize=24)
ax.set_ylim(bottom=0)
plt.draw()
if output:
print 'Creating',output
plt.savefig(output+'.png',bbox_inches='tight',dpi=100)
if DOPDF:
plt.savefig(output+'.pdf',bbox_inches='tight')
else:
plt.show()
class Plotter(object):
def __init__(self):
self.hists = []
self.ymin = None
self.ymax = None
self.auto_yrange = False
def add_hist(self, hist, label='', plotstyle='b'):
if type(plotstyle) == int:
plotstyle = default_colors[plotstyle]
self.hists.append((hist, label, plotstyle))
def clear(self):
plt.clf()
self.hists = []
self.ymin = None
self.ymax = None
def plot(self, xlabel=None, ylabel=None, output=None, errors=True, logy=False):
plt.clf()
ax = plt.gca()
for hist, label, plotstyle in self.hists:
hist.plot(color=plotstyle, label=label, errors=errors)
if xlabel:
plt.xlabel(xlabel, fontsize=24)
if ylabel:
plt.ylabel(ylabel, fontsize=24)
if logy:
plt.yscale('log', nonposy='clip')
plt.legend(loc=0, fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=20)
if not self.auto_yrange:
if self.ymax is not None:
ax.set_ylim(top=self.ymax)
if self.ymin is not None:
ax.set_ylim(bottom=self.ymin)
elif not logy:
ax.set_ylim(bottom=0)
plt.draw()
if 'output':
print 'Creating',output
plt.savefig(output+'.png',bbox_inches='tight',dpi=100)
if DOPDF:
plt.savefig(output+'.pdf',bbox_inches='tight')
else:
plt.show()
p = Plotter()
class Roccer(object):
def __init__(self, y_range=range(-5,1), axis=[0.2,1,0.0005,1]):
self.cfgs = []
self.axis = axis
self.yticks = [10**x for x in y_range]
self.yticklabels = [('1' if x==0 else r'$10^{%i}$'%x) for x in y_range]
self.xticks = [0.2, 0.4, 0.6, 0.8, 1]
self.xticklabels = map(str, self.xticks)
def add_vars(self, sig_hists, bkg_hists, labels, order=None):
if order is None:
order = sorted(sig_hists)
try:
for h in order:
try:
label = labels[h]
if type(label) == str:
self.cfgs.append((sig_hists[h], bkg_hists[h], label, None, '-'))
elif len(label) == 1:
self.cfgs.append((sig_hists[h], bkg_hists[h], label[0], None, '-'))
elif len(label) == 2:
self.cfgs.append((sig_hists[h], bkg_hists[h], label[0], label[1], '-'))
else:
self.cfgs.append((sig_hists[h], bkg_hists[h], label[0], label[1], label[2]))
except KeyError:
pass # something wasn't provided - skip!
except TypeError as e :#only one sig_hist was handed over - not iterable
if type(labels) == str:
self.cfgs.append((sig_hists[h], bkg_hists[h], labels, None, '-'))
elif len(labels) == 1:
self.cfgs.append((sig_hists[h], bkg_hists[h], labels[0], None, '-'))
elif len(labels) == 2:
self.cfgs.append((sig_hists[h], bkg_hists[h], labels[0], labels[1], '-'))
else:
self.cfgs.append((sig_hists[h], bkg_hists[h], labels[0], labels[1], labels[2]))
def clear(self):
self.cfgs = []
def plot(self, output):
fig, ax = plt.subplots(1)
ax.get_xaxis().set_tick_params(which='both',direction='in')
ax.get_yaxis().set_tick_params(which='both',direction='in')
ax.grid(True,ls='-.',lw=0.4,zorder=-99,color='gray',alpha=0.7,which='major')
min_value = 1
colors = pl.cm.tab10(np.linspace(0,1,len(self.cfgs)))
for i, (sig_hist, bkg_hist, label, customcolor, linestyle) in enumerate(self.cfgs):
h_sig = sig_hist
h_bkg = bkg_hist
rmin = h_sig.bins[0]
rmax = h_sig.bins[len(h_sig.bins)-1]
epsilons_sig = []
epsilons_bkg = []
inverted = h_sig.median() < h_bkg.median()
total_sig = h_sig.integral()
total_bkg = h_bkg.integral()
nbins = h_sig.bins.shape[0]
for ib in xrange(nbins+1):
if inverted:
esig = h_sig.integral(hi=ib) / total_sig
ebkg = h_bkg.integral(hi=ib) / total_bkg
else:
esig = h_sig.integral(lo=ib) / total_sig
ebkg = h_bkg.integral(lo=ib) / total_bkg
epsilons_sig.append(esig)
epsilons_bkg.append(ebkg)
if ebkg < min_value and ebkg > 0:
min_value = ebkg
if customcolor is None:
color = colors[i]
elif type(customcolor) == int:
color = default_colors[customcolor]
else:
color = customcolor
plt.plot(epsilons_sig, epsilons_bkg, color=color, label=label, linewidth=2, ls=linestyle)
plt.axis(self.axis)
ax = plt.gca()
#plt.set_xlim(self.axis[:2])
#plt.set_ylim(self.axis[-2:])
ax.tick_params(axis='both', which='major', labelsize=20)
ax.tick_params(axis='both', which='minor', labelsize=0)
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposx='clip')
plt.legend(loc=2, fontsize=22)
plt.ylabel('Background fake rate', fontsize=24)
plt.xlabel('Signal efficiency', fontsize=24)
ax.set_yticks(self.yticks)
ax.set_yticklabels(self.yticklabels)
ax.set_xticks(self.xticks)
ax.set_xticklabels(self.xticklabels)
print 'Creating',output
plt.savefig(output+'.png',bbox_inches='tight',dpi=300)
if DOPDF:
plt.savefig(output+'.pdf',bbox_inches='tight')
|
sidnarayanan/BAdNet
|
python/subtlenet/utils.py
|
Python
|
mit
| 18,014
|
#!/usr/bin/python
import subprocess
import praw
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = '48gi42'
flaskport = 8886
thisMonthName = "March"
nextMonthName = "April"
readAllCommentsWhichCanBeSlower = False
lateCheckinGracePeriodIsInEffect = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.objects.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
@app.route('/updategooglechart.html', methods=["POST"])
def updategooglechart():
print "TODO: Copy display to clipboard"
subprocess.call(['./update-google-chart.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
foobarbazblarg/stayclean
|
stayclean-2016-march/serve-challenge-with-flask.py
|
Python
|
mit
| 10,622
|
def rigmarole(instr):
print("rigmarole %s" % instr)
i = 0
out = ""
while i < len(instr):
c1 = instr[i:i+2]
c2 = instr[i+2:i+4]
cc = int(c1,16) - int(c2, 16)
out += chr(cc)
# print("%s %s" % (c1, c2))
i += 4
return out
def canoodle(haystack, start_offset, num_bytes, key):
i = start_offset
counter = 0
out = b""
while i < len(haystack):
if counter >= num_bytes:
break
tmp = int(haystack[i:i+2], 16) ^ key[counter % len(key)]
out += bytes([tmp])
counter += 1
i += 4
return out
def main():
data = "9655B040B64667238524D15D6201.B95D4E01C55CC562C7557405A532D768C55FA12DD074DC697A06E172992CAF3F8A5C7306B7476B38.C555AC40A7469C234424.853FA85C470699477D3851249A4B9C4E.A855AF40B84695239D24895D2101D05CCA62BE5578055232D568C05F902DDC74D2697406D7724C2CA83FCF5C2606B547A73898246B4BC14E941F9121D464D263B947EB77D36E7F1B8254.853FA85C470699477D3851249A4B9C4E.9A55B240B84692239624.CC55A940B44690238B24CA5D7501CF5C9C62B15561056032C468D15F9C2DE374DD696206B572752C8C3FB25C3806.A8558540924668236724B15D2101AA5CC362C2556A055232AE68B15F7C2DC17489695D06DB729A2C723F8E5C65069747AA389324AE4BB34E921F9421.CB55A240B5469B23.AC559340A94695238D24CD5D75018A5CB062BA557905A932D768D15F982D.D074B6696F06D5729E2CAE3FCF5C7506AD47AC388024C14B7C4E8F1F8F21CB64".split(".")
print(data)
for i in range(len(data)):
print("%d: %s" % (i, rigmarole(data[i])))
with open("hugestream.bin", "rb") as f:
huge = f.read()
mp3filename = "stomp.mp3"
xorkey = b"\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee"
mp3 = canoodle(huge, 0, 168667, xorkey)
with open(mp3filename, "wb") as f:
f.write(mp3)
print("MP3 written to %s" % mp3filename)
key2 = b"FLARE-ON"[::-1]
try2 = canoodle(huge, 2, 285729, key2)
file2 = "v.png"
with open(file2, "wb") as f:
f.write(try2)
print("PNG written to %s" % file2)
if __name__ == "__main__":
main()
|
gray-panda/grayrepo
|
2020_flareon/04_report/soln.py
|
Python
|
gpl-2.0
| 2,149
|
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
import numpy as np
class Model():
def __init__(self, args, infer=False):
self.args = args
if infer:
args.batch_size = 1
args.seq_length = 1
if args.model == 'rnn':
cell_fn = rnn_cell.BasicRNNCell
elif args.model == 'gru':
cell_fn = rnn_cell.GRUCell
elif args.model == 'lstm':
cell_fn = rnn_cell.BasicLSTMCell
else:
raise Exception("model type not supported: {}".format(args.model))
cell = cell_fn(args.rnn_size)
self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)
self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])
softmax_b = tf.get_variable("softmax_b", [args.vocab_size])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
inputs = tf.split(1, args.seq_length, tf.nn.embedding_lookup(embedding, self.input_data))
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
def loop(prev, _):
prev = tf.matmul(prev, softmax_w) + softmax_b
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
outputs, last_state = seq2seq.rnn_decoder(inputs, self.initial_state, cell, loop_function=loop if infer else None, scope='rnnlm')
output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
self.logits = tf.matmul(output, softmax_w) + softmax_b
self.probs = tf.nn.softmax(self.logits)
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([args.batch_size * args.seq_length])],
args.vocab_size)
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length
self.final_state = last_state
self.lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
def sample(self, sess, chars, vocab, num=200, prime='The '):
state = sess.run(self.initial_state) #sess.run(self.cell.zero_state(1, tf.float32))
for char in prime[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[state] = sess.run([self.final_state], feed)
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
ret = prime
char = prime[-1]
for n in range(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[probs, state] = sess.run([self.probs, self.final_state], feed)
p = probs[0]
# sample = int(np.random.choice(len(p), p=p))
sample = weighted_pick(p)
pred = chars[sample]
ret += pred
char = pred
return ret
|
DeepLearningProjects/poem-bot
|
model.py
|
Python
|
mit
| 3,724
|
from endpoints_proto_datastore.ndb import EndpointsModel
from google.appengine.api import search
from google.appengine.ext import ndb
class Leadstatus (EndpointsModel):
_message_fields_schema = ('id','entityKey','created_at','updated_at','status','owner','organization')
owner = ndb.StringProperty()
organization = ndb.KeyProperty()
created_at = ndb.DateTimeProperty(auto_now_add=True)
updated_at = ndb.DateTimeProperty(auto_now=True)
status = ndb.StringProperty()
created_by = ndb.KeyProperty()
last_modified_by = ndb.KeyProperty()
def put(self, **kwargs):
ndb.Model.put(self, **kwargs)
self.put_index()
def put_index(self):
""" index the element at each"""
empty_string = lambda x: x if x else ""
organization = str(self.organization.id())
my_document = search.Document(
doc_id = str(self.key.id()),
fields=[
search.TextField(name=u'type', value=u'Leadstatus'),
search.TextField(name='organization', value = empty_string(organization) ),
search.TextField(name='owner', value = empty_string(self.owner) ),
search.TextField(name='title', value = empty_string(self.status) ),
search.DateField(name='created_at', value = self.created_at),
])
my_index = search.Index(name="GlobalIndex")
my_index.put(my_document)
|
ioGrow/iogrowCRM
|
crm/iomodels/leadstatuses.py
|
Python
|
agpl-3.0
| 1,390
|
##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Win32 API type description."""
from stdapi import *
SHORT = Alias("SHORT", Short)
USHORT = Alias("USHORT", UShort)
INT = Alias("INT", Int)
UINT = Alias("UINT", UInt)
LONG = Alias("LONG", Long)
ULONG = Alias("ULONG", ULong)
LONGLONG = Alias("LONGLONG", LongLong)
ULONGLONG = Alias("ULONGLONG", ULongLong)
FLOAT = Alias("FLOAT", Float)
INT8 = Alias("INT8", Int8)
UINT8 = Alias("UINT8", UInt8)
INT16 = Alias("INT16", Int16)
UINT16 = Alias("UINT16", UInt16)
INT32 = Alias("INT32", Int32)
UINT32 = Alias("UINT32", UInt32)
INT64 = Alias("INT64", Int64)
UINT64 = Alias("UINT64", UInt64)
FLOAT32 = Alias("FLOAT32", Float)
BYTE = Alias("BYTE", UInt8)
WORD = Alias("WORD", UInt16)
DWORD = Alias("DWORD", UInt32)
UCHAR = Alias("UCHAR", UChar)
WCHAR = Alias("WCHAR", Short)
BOOL = Enum("BOOL", [
"FALSE",
"TRUE",
])
LPLONG = Pointer(LONG)
LPWORD = Pointer(WORD)
LPDWORD = Pointer(DWORD)
LPBOOL = Pointer(BOOL)
LPSTR = CString
LPCSTR = ConstCString
LPWSTR = WString
LPCWSTR = ConstWString
LARGE_INTEGER = Struct("LARGE_INTEGER", [
(LONGLONG, 'QuadPart'),
])
SIZE_T = Alias("SIZE_T", SizeT)
VOID = Void
PVOID = OpaquePointer(VOID)
LPVOID = PVOID
LPCVOID = OpaquePointer(Const(VOID))
def DECLARE_HANDLE(expr):
return Handle(expr, IntPointer(expr))
# XXX: HANDLE type is often used for disjoint handle namespaces
RAW_HANDLE = IntPointer("HANDLE")
HANDLE = Handle("HANDLE", RAW_HANDLE)
HWND = DECLARE_HANDLE("HWND")
HDC = DECLARE_HANDLE("HDC")
HMONITOR = DECLARE_HANDLE("HMONITOR")
GUID = Struct("GUID", [
(DWORD, "Data1"),
(WORD, "Data2"),
(WORD, "Data3"),
(Array(BYTE, 8), "Data4"),
])
LPGUID = Pointer(GUID)
REFGUID = Alias("REFGUID", Reference(GUID))
IID = Alias("IID", GUID)
REFIID = Alias("REFIID", Reference(IID))
CLSID = Alias("CLSID", GUID)
REFCLSID = Alias("REFCLSID", Reference(CLSID))
LUID = Struct("LUID", [
(DWORD, "LowPart"),
(LONG, "HighPart"),
])
POINT = Struct("POINT", (
(LONG, "x"),
(LONG, "y"),
))
LPPOINT = Pointer(POINT)
SIZE = Struct("SIZE", (
(LONG, "cx"),
(LONG, "cy"),
))
LPSIZE = Pointer(SIZE)
RECT = Struct("RECT", (
(LONG, "left"),
(LONG, "top"),
(LONG, "right"),
(LONG, "bottom"),
))
LPRECT = Pointer(RECT)
PALETTEENTRY = Struct("PALETTEENTRY", (
(BYTE, "peRed"),
(BYTE, "peGreen"),
(BYTE, "peBlue"),
(BYTE, "peFlags"),
))
LPPALETTEENTRY = Pointer(PALETTEENTRY)
RGNDATAHEADER = Struct("RGNDATAHEADER", [
(DWORD, "dwSize"),
(DWORD, "iType"),
(DWORD, "nCount"),
(DWORD, "nRgnSize"),
(RECT, "rcBound"),
])
RGNDATA = Struct("RGNDATA", [
(RGNDATAHEADER, "rdh"),
#(Char, "Buffer[1]"),
])
LPRGNDATA = Pointer(RGNDATA)
HMODULE = IntPointer("HMODULE")
FILETIME = Struct("FILETIME", [
(DWORD, "dwLowDateTime"),
(DWORD, "dwHighDateTime"),
])
COLORREF = Alias("COLORREF", DWORD)
LOGFONTW = Struct("LOGFONTW", [
(LONG, "lfHeight"),
(LONG, "lfWidth"),
(LONG, "lfEscapement"),
(LONG, "lfOrientation"),
(LONG, "lfWeight"),
(BYTE, "lfItalic"),
(BYTE, "lfUnderline"),
(BYTE, "lfStrikeOut"),
(BYTE, "lfCharSet"),
(BYTE, "lfOutPrecision"),
(BYTE, "lfClipPrecision"),
(BYTE, "lfQuality"),
(BYTE, "lfPitchAndFamily"),
(WString, "lfFaceName"),
])
SECURITY_ATTRIBUTES = Struct("SECURITY_ATTRIBUTES", [
(DWORD, "nLength"),
(LPVOID, "lpSecurityDescriptor"),
(BOOL, "bInheritHandle"),
])
# http://msdn.microsoft.com/en-us/library/ff485842.aspx
# http://blogs.msdn.com/b/kirillosenkov/archive/2012/05/14/a-list-of-common-hresult-error-codes.aspx
HRESULT = Enum("HRESULT", [
"S_OK", # 0x00000000
"S_FALSE", # 0x00000001
"E_PENDING", # 0x8000000A
"E_NOTIMPL", # 0x80004001
"E_NOINTERFACE", # 0x80004002
"E_POINTER", # 0x80004003
"E_ABORT", # 0x80004004
"E_FAIL", # 0x80004005
"E_UNEXPECTED", # 0x8000FFFF
"E_ACCESSDENIED", # 0x80070005
"E_HANDLE", # 0x80070006
"E_OUTOFMEMORY", # 0x8007000E
"E_INVALIDARG", # 0x80070057
])
def MAKE_HRESULT(errors, ok = "S_OK", false = "S_FALSE"):
# Always update global HRESULT
HRESULT.values.extend(errors)
if ok == "S_OK" and false == "S_FALSE":
# Just return global HRESULT
return HRESULT
else:
# Return a variation
values = [ok, false]
assert HRESULT.values[0] == "S_OK"
assert HRESULT.values[1] == "S_FALSE"
values.extend(HRESULT.values[2:])
return Enum("HRESULT", values)
IUnknown = Interface("IUnknown")
IUnknown.methods = (
StdMethod(HRESULT, "QueryInterface", ((REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppvObj"))),
StdMethod(ULONG, "AddRef", ()),
StdMethod(ULONG, "Release", ()),
)
|
surround-io/apitrace
|
specs/winapi.py
|
Python
|
mit
| 6,000
|
#! /usr/bin/env python
from __future__ import absolute_import, print_function
"""
A sample implementation of a single coinjoin script,
adapted from `sendpayment.py` in Joinmarket-Org/joinmarket.
For notes, see scripts/README.md; in particular, note the use
of "schedules" with the -S flag.
"""
import random
import sys
import threading
from optparse import OptionParser
from twisted.internet import reactor
import time
import os
import pprint
from jmclient import (Taker, load_program_config, get_schedule,
JMClientProtocolFactory, start_reactor,
validate_address, jm_single, WalletError,
choose_orders, choose_sweep_orders,
cheapest_order_choose, weighted_order_choose,
Wallet, BitcoinCoreWallet, sync_wallet,
RegtestBitcoinCoreInterface, estimate_tx_fee,
direct_send, SegwitWallet)
from twisted.python.log import startLogging
from jmbase.support import get_log, debug_dump_object, get_password
from cli_options import get_sendpayment_parser
log = get_log()
#CLI specific, so relocated here (not used by tumbler)
def pick_order(orders, n): #pragma: no cover
print("Considered orders:")
for i, o in enumerate(orders):
print(" %2d. %20s, CJ fee: %6s, tx fee: %6d" %
(i, o[0]['counterparty'], str(o[0]['cjfee']), o[0]['txfee']))
pickedOrderIndex = -1
if i == 0:
print("Only one possible pick, picking it.")
return orders[0]
while pickedOrderIndex == -1:
try:
pickedOrderIndex = int(raw_input('Pick an order between 0 and ' +
str(i) + ': '))
except ValueError:
pickedOrderIndex = -1
continue
if 0 <= pickedOrderIndex < len(orders):
return orders[pickedOrderIndex]
pickedOrderIndex = -1
def main():
parser = get_sendpayment_parser()
(options, args) = parser.parse_args()
load_program_config()
walletclass = SegwitWallet if jm_single().config.get(
"POLICY", "segwit") == "true" else Wallet
if options.schedule == '' and len(args) < 3:
parser.error('Needs a wallet, amount and destination address')
sys.exit(0)
#without schedule file option, use the arguments to create a schedule
#of a single transaction
sweeping = False
if options.schedule == '':
#note that sendpayment doesn't support fractional amounts, fractions throw
#here.
amount = int(args[1])
if amount == 0:
sweeping = True
destaddr = args[2]
mixdepth = options.mixdepth
addr_valid, errormsg = validate_address(destaddr)
if not addr_valid:
print('ERROR: Address invalid. ' + errormsg)
return
schedule = [[options.mixdepth, amount, options.makercount,
destaddr, 0.0, 0]]
else:
result, schedule = get_schedule(options.schedule)
if not result:
log.info("Failed to load schedule file, quitting. Check the syntax.")
log.info("Error was: " + str(schedule))
sys.exit(0)
mixdepth = 0
for s in schedule:
if s[1] == 0:
sweeping = True
#only used for checking the maximum mixdepth required
mixdepth = max([mixdepth, s[0]])
wallet_name = args[0]
#to allow testing of confirm/unconfirm callback for multiple txs
if isinstance(jm_single().bc_interface, RegtestBitcoinCoreInterface):
jm_single().bc_interface.tick_forward_chain_interval = 10
jm_single().bc_interface.simulating = True
jm_single().maker_timeout_sec = 15
chooseOrdersFunc = None
if options.pickorders:
chooseOrdersFunc = pick_order
if sweeping:
print('WARNING: You may have to pick offers multiple times')
print('WARNING: due to manual offer picking while sweeping')
elif options.choosecheapest:
chooseOrdersFunc = cheapest_order_choose
else: # choose randomly (weighted)
chooseOrdersFunc = weighted_order_choose
# Dynamically estimate a realistic fee if it currently is the default value.
# At this point we do not know even the number of our own inputs, so
# we guess conservatively with 2 inputs and 2 outputs each.
if options.txfee == -1:
options.txfee = max(options.txfee, estimate_tx_fee(2, 2,
txtype="p2sh-p2wpkh"))
log.debug("Estimated miner/tx fee for each cj participant: " + str(
options.txfee))
assert (options.txfee >= 0)
log.debug('starting sendpayment')
if not options.userpcwallet:
#maxmixdepth in the wallet is actually the *number* of mixdepths (so misnamed);
#to ensure we have enough, must be at least (requested index+1)
max_mix_depth = max([mixdepth+1, options.amtmixdepths])
if not os.path.exists(os.path.join('wallets', wallet_name)):
wallet = walletclass(wallet_name, None, max_mix_depth, options.gaplimit)
else:
while True:
try:
pwd = get_password("Enter wallet decryption passphrase: ")
wallet = walletclass(wallet_name, pwd, max_mix_depth, options.gaplimit)
except WalletError:
print("Wrong password, try again.")
continue
except Exception as e:
print("Failed to load wallet, error message: " + repr(e))
sys.exit(0)
break
else:
wallet = BitcoinCoreWallet(fromaccount=wallet_name)
if jm_single().config.get("BLOCKCHAIN",
"blockchain_source") == "electrum-server" and options.makercount != 0:
jm_single().bc_interface.synctype = "with-script"
#wallet sync will now only occur on reactor start if we're joining.
sync_wallet(wallet, fast=options.fastsync)
if options.makercount == 0:
if isinstance(wallet, BitcoinCoreWallet):
raise NotImplementedError("Direct send only supported for JM wallets")
direct_send(wallet, amount, mixdepth, destaddr, options.answeryes)
return
if walletclass == Wallet:
print("Only direct sends (use -N 0) are supported for "
"legacy (non-segwit) wallets.")
return
def filter_orders_callback(orders_fees, cjamount):
orders, total_cj_fee = orders_fees
log.info("Chose these orders: " +pprint.pformat(orders))
log.info('total cj fee = ' + str(total_cj_fee))
total_fee_pc = 1.0 * total_cj_fee / cjamount
log.info('total coinjoin fee = ' + str(float('%.3g' % (
100.0 * total_fee_pc))) + '%')
WARNING_THRESHOLD = 0.02 # 2%
if total_fee_pc > WARNING_THRESHOLD:
log.info('\n'.join(['=' * 60] * 3))
log.info('WARNING ' * 6)
log.info('\n'.join(['=' * 60] * 1))
log.info('OFFERED COINJOIN FEE IS UNUSUALLY HIGH. DOUBLE/TRIPLE CHECK.')
log.info('\n'.join(['=' * 60] * 1))
log.info('WARNING ' * 6)
log.info('\n'.join(['=' * 60] * 3))
if not options.answeryes:
if raw_input('send with these orders? (y/n):')[0] != 'y':
return False
return True
def taker_finished(res, fromtx=False, waittime=0.0, txdetails=None):
if fromtx == "unconfirmed":
#If final entry, stop *here*, don't wait for confirmation
if taker.schedule_index + 1 == len(taker.schedule):
reactor.stop()
return
if fromtx:
if res:
txd, txid = txdetails
taker.wallet.remove_old_utxos(txd)
taker.wallet.add_new_utxos(txd, txid)
reactor.callLater(waittime*60,
clientfactory.getClient().clientStart)
else:
#a transaction failed; just stop
reactor.stop()
else:
if not res:
log.info("Did not complete successfully, shutting down")
#Should usually be unreachable, unless conf received out of order;
#because we should stop on 'unconfirmed' for last (see above)
else:
log.info("All transactions completed correctly")
reactor.stop()
taker = Taker(wallet,
schedule,
order_chooser=chooseOrdersFunc,
callbacks=(filter_orders_callback, None, taker_finished))
clientfactory = JMClientProtocolFactory(taker)
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
if jm_single().config.get("BLOCKCHAIN", "network") in ["regtest", "testnet"]:
startLogging(sys.stdout)
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon)
if __name__ == "__main__":
main()
print('done')
|
chris-belcher/joinmarket-clientserver
|
scripts/sendpayment.py
|
Python
|
gpl-3.0
| 9,275
|
import numpy as np
def sv_main(n_items=1500, q_factor_x=2, q_factor_y=2, seed=6, scale=100.0):
in_sockets = [
['s', 'n_items', n_items],
['s', 'q_factor_x', q_factor_x],
['s', 'q_factor_y', q_factor_y],
['s', 'seed', seed],
['s', 'scale', scale]]
np.random.seed(seed)
points=np.random.uniform(0.0,.1,size = (n_items,2))
points *= (1000, 200)
a = points[:,0]
b = points[:,1]
if scale==0:
scale = 1
a = np.floor(a / q_factor_x) * q_factor_x * (scale/1000)
b = np.floor(b / q_factor_y) * q_factor_y * (scale/1000)
c = np.array([0.0 for i in b])
d = np.column_stack((a,b,c))
# consumables
Verts = [d.tolist()]
out_sockets = [
['v', 'Verts', Verts]
]
return in_sockets, out_sockets
|
taxpon/sverchok
|
node_scripts/templates/zeffii/matrices.py
|
Python
|
gpl-3.0
| 804
|
# -*- coding: utf-8 -*-
import datetime
from django.utils.timezone import now
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
TYPE_CHOICES = {
'F': orm.Tag.objects.get_or_create(label='Feature', defaults={'color': "C0FF4A"})[0],
'B': orm.Tag.objects.get_or_create(label='Bug', defaults={'color': "F53131"})[0],
'C': orm.Tag.objects.get_or_create(label='Cosmetic', defaults={'color': "C748DB"})[0],
'O': orm.Tag.objects.get_or_create(label='Other', defaults={'color': "538CA6"})[0],
}
for type_, tag in TYPE_CHOICES.iteritems():
for issue in orm.Issue.objects.filter(type=type_):
# print tag
issue.tags.add(tag)
# orm.Issue.objects.filter(type=type_).update(tags=[tag])
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'issues.issue': {
'Meta': {'ordering': "['project', 'closed_by_revision', '-priority']", 'object_name': 'Issue'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by_revision': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'days_estimate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '65', 'decimal_places': '5', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['issues.Milestone']", 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['issues.Project']", 'null': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['issues.Tag']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'issues.milestone': {
'Meta': {'object_name': 'Milestone'},
'deadline': ('django.db.models.fields.DateTimeField', [], {'default': 'now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['issues.Project']"})
},
'issues.project': {
'Meta': {'object_name': 'Project'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'issues.tag': {
'Meta': {'object_name': 'Tag'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['issues']
symmetrical = True
|
mostateresnet/django-ticket
|
issues/migrations/0003_tags_apply.py
|
Python
|
mit
| 6,862
|
#!/usr/bin/env python
import serial, sys
serialPort = sys.argv[1]
ser = serial.Serial(
port=serialPort,
baudrate=1200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
ser.isOpen()
ser.close() # always close port
|
arsh0r/heatmeter
|
heatmeter/reset.py
|
Python
|
mit
| 270
|
"""setup.py"""
from setuptools import setup
with open("README.md") as readme_file:
README = readme_file.read()
test_requirements = ["mock", "pytest", "responses", "testfixtures", "requests", "pyzmq"]
# Async requirements
test_requirements.extend(["pytest-asyncio", "aiohttp", "tornado", "websockets"])
setup(
author="Beau Barker",
author_email="beauinmelbourne@gmail.com",
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Send JSON-RPC requests",
entry_points={"console_scripts": ["jsonrpc = jsonrpcclient.__main__:main"]},
extras_require={
"aiohttp": ["aiohttp>=3"],
"requests": ["requests"],
"requests_security": ["requests[security]"],
"tornado": ["tornado"],
"unittest": test_requirements,
"websockets": ["websockets"],
"zmq": ["pyzmq"],
},
include_package_data=True,
install_requires=["apply_defaults<1", "click<8", "jsonschema<4"],
license="MIT",
long_description=README,
long_description_content_type="text/markdown",
name="jsonrpcclient",
# Be PEP 561 compliant
# https://mypy.readthedocs.io/en/stable/installed_packages.html#making-pep-561-compatible-packages
package_data={"jsonrpcclient": ["response-schema.json", "py.typed"]},
zip_safe=False,
packages=["jsonrpcclient", "jsonrpcclient.clients"],
url="https://github.com/bcb/jsonrpcclient",
version="3.3.6",
)
|
bcb/jsonrpcclient
|
setup.py
|
Python
|
mit
| 1,595
|
# -*- coding: utf-8 -*-
"""
================================================
Following the Metal to Mott insulator Transition
================================================
Plot of the Quasiparticle weigth decay for the Hubbard
Model in the Bethe Lattice as the local interaction is raised.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
from __future__ import division, absolute_import, print_function
import matplotlib.pyplot as plt
import numpy as np
from dmft.twosite import dmft_loop
def plot_z(axis='matsubara', du=0.05):
fig = plt.figure()
u_int = np.arange(0, 6.2, du)
for beta in [6, 10, 20, 30, 50, 100, 1e3]:
out_file = axis+'_halffill_b{}_dU{}'.format(beta, du)
try:
res = np.load(out_file+'.npy')
except IOError:
res = dmft_loop(u_int, axis, beta=beta, hop=1)
np.save(out_file, res)
plt.plot(res[:, 0]/2, res[:, 1], '+-', label='$\\beta = {}$'.format(beta))
plt.legend(loc=0)
plt.title('Quasiparticle weigth, estimated in {} frequencies'.format(axis))
plt.ylabel('Z')
plt.xlabel('U/D')
fig.savefig(out_file+'_Z.png', format='png',
transparent=False, bbox_inches='tight', pad_inches=0.05)
def plot_z_real():
plot_z('real')
if __name__ == "gallery":
plot_z('real')
plot_z('matsubara')
|
Titan-C/learn-dmft
|
examples/twosite/plot_halffill_z.py
|
Python
|
gpl-3.0
| 1,346
|
"""
SoftLayer.tests.managers.ipsec_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
from mock import MagicMock
import SoftLayer
from SoftLayer.exceptions import SoftLayerAPIError
from SoftLayer import testing
class IPSECTests(testing.TestCase):
def set_up(self):
self.ipsec = SoftLayer.IPSECManager(self.client)
def test_add_internal_subnet(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'addPrivateSubnetToNetworkTunnel')
mock.return_value = True
self.assertEqual(self.ipsec.add_internal_subnet(445, 565787), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'addPrivateSubnetToNetworkTunnel',
args=(565787,),
identifier=445)
def test_add_remote_subnet(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'addCustomerSubnetToNetworkTunnel')
mock.return_value = True
self.assertEqual(self.ipsec.add_remote_subnet(445, 565787), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'addCustomerSubnetToNetworkTunnel',
args=(565787,),
identifier=445)
def test_add_service_subnet(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'addServiceSubnetToNetworkTunnel')
mock.return_value = True
self.assertEqual(self.ipsec.add_service_subnet(445, 565787), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'addServiceSubnetToNetworkTunnel',
args=(565787,),
identifier=445)
def test_apply_configuration(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'applyConfigurationsToDevice')
mock.return_value = True
self.assertEqual(self.ipsec.apply_configuration(445), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'applyConfigurationsToDevice',
args=(),
identifier=445)
def test_create_remote_subnet(self):
mock = self.set_mock('SoftLayer_Network_Customer_Subnet',
'createObject')
mock.return_value = {'id': 565787,
'networkIdentifier': '50.0.0.0',
'cidr': 29,
'accountId': 999000}
result = self.ipsec.create_remote_subnet(999000, '50.0.0.0', 29)
self.assertEqual(result, mock.return_value)
self.assert_called_with('SoftLayer_Network_Customer_Subnet',
'createObject',
args=({'networkIdentifier': '50.0.0.0',
'cidr': 29,
'accountId': 999000},))
def test_create_translation(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'createAddressTranslation')
mock.return_value = {'id': 787989,
'customerIpAddress': '50.0.0.0',
'customerIpAddressId': 672634,
'internalIpAddress': '10.0.0.0',
'internalIpAddressId': 871231,
'notes': 'first translation'}
result = self.ipsec.create_translation(445,
'10.0.0.0',
'50.0.0.0',
'first translation')
self.assertEqual(result, mock.return_value)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'createAddressTranslation',
args=({'customerIpAddress': '50.0.0.0',
'internalIpAddress': '10.0.0.0',
'notes': 'first translation'},),
identifier=445)
def test_delete_remote_subnet(self):
mock = self.set_mock('SoftLayer_Network_Customer_Subnet',
'deleteObject')
mock.return_value = True
self.assertEqual(self.ipsec.delete_remote_subnet(565787), True)
self.assert_called_with('SoftLayer_Network_Customer_Subnet',
'deleteObject',
identifier=565787)
def test_get_tunnel_context(self):
_filter = {'networkTunnelContexts': {'id': {'operation': 445}}}
_mask = '[mask[id]]'
mock = self.set_mock('SoftLayer_Account', 'getNetworkTunnelContexts')
mock.return_value = [{'id': 445}]
result = self.ipsec.get_tunnel_context(445, mask=_mask)
self.assertEqual(result, mock.return_value[0])
self.assert_called_with('SoftLayer_Account',
'getNetworkTunnelContexts',
filter=_filter,
mask=_mask)
def test_get_tunnel_context_raises_error(self):
mock = self.set_mock('SoftLayer_Account', 'getNetworkTunnelContexts')
mock.return_value = []
self.assertRaises(SoftLayerAPIError,
self.ipsec.get_tunnel_context,
445)
def test_get_translation(self):
mock = self.set_mock('SoftLayer_Account', 'getNetworkTunnelContexts')
mock.return_value = [{'id': 445, 'addressTranslations':
[{'id': 234123}, {'id': 872341}]}]
self.assertEqual(self.ipsec.get_translation(445, 872341),
{'id': 872341,
'customerIpAddress': '',
'internalIpAddress': ''})
self.assert_called_with('SoftLayer_Account',
'getNetworkTunnelContexts')
def test_get_translation_raises_error(self):
mock = self.set_mock('SoftLayer_Account', 'getNetworkTunnelContexts')
mock.return_value = [{'id': 445, 'addressTranslations':
[{'id': 234123}]}]
self.assertRaises(SoftLayerAPIError,
self.ipsec.get_translation,
445,
872341)
def test_get_translations(self):
_mask = ('[mask[addressTranslations[customerIpAddressRecord,'
'internalIpAddressRecord]]]')
_filter = {'networkTunnelContexts': {'id': {'operation': 445}}}
mock = self.set_mock('SoftLayer_Account', 'getNetworkTunnelContexts')
mock.return_value = [{'id': 445,
'addressTranslations': [{
'id': 234123,
'customerIpAddressRecord':
{'ipAddress': '50.0.0.0'},
'customerIpAddressId': 234112,
'internalIpAddressRecord':
{'ipAddress': '10.0.0.0'},
'internalIpAddressId': 234442
}]}]
self.assertEqual(self.ipsec.get_translations(445),
[{'id': 234123,
'customerIpAddress': '50.0.0.0',
'customerIpAddressId': 234112,
'internalIpAddress': '10.0.0.0',
'internalIpAddressId': 234442}])
self.assert_called_with('SoftLayer_Account',
'getNetworkTunnelContexts',
filter=_filter,
mask=_mask)
def test_get_tunnel_contexts(self):
_mask = '[mask[addressTranslations]]'
mock = self.set_mock('SoftLayer_Account', 'getNetworkTunnelContexts')
mock.return_value = [{'id': 445}, {'id': 446}]
self.assertEqual(self.ipsec.get_tunnel_contexts(mask=_mask),
mock.return_value)
self.assert_called_with('SoftLayer_Account',
'getNetworkTunnelContexts',
mask=_mask)
def test_remove_internal_subnet(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'removePrivateSubnetFromNetworkTunnel')
mock.return_value = True
self.assertEqual(self.ipsec.remove_internal_subnet(445, 565787), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'removePrivateSubnetFromNetworkTunnel',
args=(565787,),
identifier=445)
def test_remove_remote_subnet(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'removeCustomerSubnetFromNetworkTunnel')
mock.return_value = True
self.assertEqual(self.ipsec.remove_remote_subnet(445, 565787), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'removeCustomerSubnetFromNetworkTunnel',
args=(565787,),
identifier=445)
def test_remove_service_subnet(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'removeServiceSubnetFromNetworkTunnel')
mock.return_value = True
self.assertEqual(self.ipsec.remove_service_subnet(445, 565787), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'removeServiceSubnetFromNetworkTunnel',
args=(565787,),
identifier=445)
def test_remove_translation(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'deleteAddressTranslation')
mock.return_value = True
self.assertEqual(self.ipsec.remove_translation(445, 787547), True)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'deleteAddressTranslation',
args=(787547,),
identifier=445)
def test_update_translation(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'editAddressTranslation')
mock.return_value = True
translation = {'id': 234123,
'customerIpAddress': '50.0.0.0',
'customerIpAddressId': 234112,
'internalIpAddress': '10.0.0.0',
'internalIpAddressId': 234442}
self.ipsec.get_translation = MagicMock(return_value=translation)
result = self.ipsec.update_translation(445,
234123,
static_ip='10.0.0.2',
remote_ip='50.0.0.2',
notes='do not touch')
self.assertEqual(result, True)
self.ipsec.get_translation.assert_called_with(445, 234123)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'editAddressTranslation',
args=({'id': 234123,
'customerIpAddress': '50.0.0.2',
'internalIpAddress': '10.0.0.2',
'notes': 'do not touch'},),
identifier=445)
def test_update_tunnel_context(self):
mock = self.set_mock('SoftLayer_Network_Tunnel_Module_Context',
'editObject')
mock.return_value = True
context = {'id': 445,
'name': 'der tunnel',
'friendlyName': 'the tunnel',
'internalPeerIpAddress': '10.0.0.1',
'customerPeerIpAddress': '50.0.0.1',
'advancedConfigurationFlag': 0,
'presharedKey': 'secret',
'phaseOneAuthentication': 'MD5',
'phaseOneDiffieHellmanGroup': 1,
'phaseOneEncryption': 'DES',
'phaseOneKeylife': 600,
'phaseTwoAuthentication': 'MD5',
'phaseTwoDiffieHellmanGroup': 1,
'phaseTwoEncryption': 'DES',
'phaseTwoKeylife': 600,
'phaseTwoPerfectForwardSecrecy': 0}
self.ipsec.get_tunnel_context = MagicMock(return_value=context)
result = self.ipsec.update_tunnel_context(445,
friendly_name='ipsec tunnel',
remote_peer='50.0.0.2',
preshared_key='enigma',
phase1_auth='SHA256',
phase1_dh=5,
phase1_crypto='AES256',
phase1_key_ttl=120,
phase2_auth='SHA128',
phase2_dh=2,
phase2_crypto='AES192',
phase2_key_ttl=240,
phase2_forward_secrecy=1)
self.assertEqual(result, True)
self.ipsec.get_tunnel_context.assert_called_with(445)
self.assert_called_with('SoftLayer_Network_Tunnel_Module_Context',
'editObject',
args=({'id': 445,
'name': 'der tunnel',
'friendlyName': 'ipsec tunnel',
'internalPeerIpAddress': '10.0.0.1',
'customerPeerIpAddress': '50.0.0.2',
'advancedConfigurationFlag': 0,
'presharedKey': 'enigma',
'phaseOneAuthentication': 'SHA256',
'phaseOneDiffieHellmanGroup': 5,
'phaseOneEncryption': 'AES256',
'phaseOneKeylife': 120,
'phaseTwoAuthentication': 'SHA128',
'phaseTwoDiffieHellmanGroup': 2,
'phaseTwoEncryption': 'AES192',
'phaseTwoKeylife': 240,
'phaseTwoPerfectForwardSecrecy': 1},),
identifier=445)
|
nanjj/softlayer-python
|
tests/managers/ipsec_tests.py
|
Python
|
mit
| 15,462
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
from setuptools import setup
def read_file(fname):
"""
Read file and decode in py2k
"""
if sys.version_info < (3,):
return open(fname).read().decode("utf-8")
return open(fname).read()
dist_name = 'kolibri_exercise_perseus_plugin'
readme = read_file('README.rst')
# Default description of the distributed package
description = (
"""Kolibri plugin for rendering Khan Academy Perseus style exercises"""
)
######################################
# STATIC AND DYNAMIC BUILD SPECIFICS #
######################################
def enable_log_to_stdout(logname):
"""Given a log name, outputs > INFO to stdout."""
log = logging.getLogger(logname)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
setup(
name=dist_name,
version="0.6.15",
description=description,
long_description="{readme}".format(
readme=readme,
),
author='Learning Equality',
author_email='info@learningequality.org',
url='https://github.com/learningequality/kolibri-exercise-perseus-plugin',
packages=[
str('kolibri_exercise_perseus_plugin'), # https://github.com/pypa/setuptools/pull/597
],
package_dir={'kolibri_exercise_perseus_plugin': 'kolibri_exercise_perseus_plugin'},
include_package_data=True,
license='MIT',
zip_safe=False,
keywords='kolibri',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
christianmemije/kolibri-exercise-perseus-plugin
|
setup.py
|
Python
|
mit
| 2,312
|
import py, os, sys
import subprocess
def pytest_funcarg__standalone(request):
return request.cached_setup(scope="module",
setup=lambda: Standalone(request))
class Standalone:
def __init__(self, request):
self.testdir = request.getfuncargvalue("testdir")
script = "mypytest"
result = self.testdir.runpytest("--genscript=%s" % script)
assert result.ret == 0
self.script = self.testdir.tmpdir.join(script)
assert self.script.check()
def run(self, anypython, testdir, *args):
testdir.chdir()
return testdir._run(anypython, self.script, *args)
def test_gen(testdir, anypython, standalone):
result = standalone.run(anypython, testdir, '--version')
assert result.ret == 0
result.stderr.fnmatch_lines([
"*imported from*mypytest*"
])
p = testdir.makepyfile("def test_func(): assert 0")
result = standalone.run(anypython, testdir, p)
assert result.ret != 0
def test_rundist(testdir, pytestconfig, standalone):
pytestconfig.pluginmanager.skipifmissing("xdist")
testdir.makepyfile("""
def test_one():
pass
""")
result = standalone.run(sys.executable, testdir, '-n', '3')
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1 passed*",
])
|
lotaku/pytest-2.3.5
|
testing/test_genscript.py
|
Python
|
mit
| 1,311
|
#
# Copyright (C) 2017 Kevin Thornton <krthornt@uci.edu>
#
# This file is part of fwdpy11.
#
# fwdpy11 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fwdpy11 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fwdpy11. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
import fwdpy11
import msprime
import numpy as np
class TestConversion(unittest.TestCase):
@classmethod
def setUp(self):
self.ts = msprime.simulate(10, recombination_rate=0.025, Ne=1000)
# def testGetTablesDiscretizeTime(self):
# n, e, ntips, l = fwdpy11.ts_from_msprime._convert_tables(self.ts)
# self.assertEqual(ntips, 10)
# self.assertEqual(l, 1.0)
# na = np.array(n, copy=False)
# tzero = np.where(na['time'] == 0.0)
# self.assertTrue(len(tzero[0]) == 10)
def testCreateDiploidPopulation(self):
pop = fwdpy11.DiploidPopulation.create_from_tskit(self.ts)
self.assertEqual(pop.N, 5)
self.assertEqual(pop.tables.genome_length, 1.0)
md = np.array(pop.diploid_metadata, copy=False)
n = md["nodes"].flatten()
self.assertTrue(np.array_equal(n, np.arange(2 * pop.N, dtype=n.dtype)))
class TestConversionFromMultipleDemes(unittest.TestCase):
def test_deme_field_of_metadata(self):
nodes_per_deme = 500
Ne = 3 * nodes_per_deme // 2
Nr = 50.0
config = [
msprime.PopulationConfiguration(nodes_per_deme),
msprime.PopulationConfiguration(nodes_per_deme),
msprime.PopulationConfiguration(nodes_per_deme),
]
events = [
msprime.MassMigration(1 * Ne, 1, 0, 1.0),
msprime.MassMigration(1.5 * Ne, 2, 0, 1.0),
]
ts = msprime.simulate(
population_configurations=config,
Ne=Ne,
random_seed=98765,
recombination_rate=Nr / Ne,
demographic_events=events,
)
pop = fwdpy11.DiploidPopulation.create_from_tskit(ts)
self.assertEqual(pop.N, 750)
alive_nodes = pop.alive_nodes
self.assertEqual(len(alive_nodes), 2 * pop.N)
for i in range(3):
for j in alive_nodes[i * nodes_per_deme : (i + 1) * nodes_per_deme]:
self.assertEqual(pop.tables.nodes[j].deme, i)
k, el = i * nodes_per_deme // 2, (i + 1) * nodes_per_deme // 2
for j in pop.diploid_metadata[k:el]:
self.assertEqual(j.deme, i)
if __name__ == "__main__":
unittest.main()
|
molpopgen/fwdpy11
|
tests/test_ts_from_msprime.py
|
Python
|
gpl-3.0
| 2,969
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core service interface definitions."""
class Service(object):
"""Abstract base service interface."""
def enabled(self):
raise NotImplementedError()
class Notifications(Service):
def query(self, to, intent):
"""Gets the Status of notifications queued previously via send_async().
Serially performs one datastore query per user in the to list.
Args:
to: list of string. The recipients of the notification.
intent: string. Short string identifier of the intent of the
notification (for example, 'invitation' or 'reminder').
Returns:
Dict of to string -> [Status, sorted by descending enqueue date]. See
modules.notifications.notifications.Status for an example of the
Status object.
"""
raise NotImplementedError()
def send_async(
self, to, sender, intent, body, subject, audit_trail=None, html=None,
retention_policy=None):
"""Asyncronously sends a notification via email.
Args:
to: string. Recipient email address. Must have a valid form, but we
cannot know that the address can actually be delivered to.
sender: string. Email address of the sender of the
notification. Must be a valid sender for the App Engine
deployment at the time the deferred send_mail() call actually
executes (meaning it cannot be the email address of the user
currently in session, because the user will not be in session at
call time). See
https://developers.google.com/appengine/docs/python/mail/emailmessagefields.
intent: string. Short string identifier of the intent of the
notification (for example, 'invitation' or 'reminder'). Each kind
of notification you are sending should have its own intent.
Used when creating keys in the index; values that cause the
resulting key to be >500B will fail. May not contain a colon.
body: string. The data payload of the notification. Must fit in a
datastore entity.
subject: string. Subject line for the notification.
audit_trail: JSON-serializable object. An optional audit trail that,
when used with the default retention policy, will be retained
even after the body is scrubbed from the datastore.
html: optional string. The data payload of the notification as html.
Must fit in a datastore entity when combined with the plain
text version. Both the html and plain text body will be
sent, and the recipient's mail client will decide which to
show.
retention_policy: RetentionPolicy. The retention policy to use for
data after a Notification has been sent. By default, we retain the
audit_trail but not the body.
Returns:
(notification_key, payload_key). A 2-tuple of datastore keys for the
created notification and payload.
Raises:
Exception: if values delegated to model initializers are invalid.
ValueError: if to or sender are malformed according to App Engine
(note that well-formed values do not guarantee success).
"""
raise NotImplementedError()
class Unsubscribe(Service):
def get_unsubscribe_url(self, handler, email):
"""Create an individualized unsubscribe link for a user.
Args:
handler: controllers.utils.ApplicationHandler. The current request
handler.
email: string. The email address of the users for whom the unsubscribe
link is being generated.
Returns:
string. A URL for the users to unsubscribe from notifications.
"""
raise NotImplementedError()
def has_unsubscribed(self, email):
"""Check whether the user has requested to be unsubscribed.
Args:
email: string. The email address of the user.
Returns:
bool. True if the user has requested to be unsubscribed.
"""
raise NotImplementedError()
def set_subscribed(self, email, is_subscribed):
"""Set the state of a given user.
Args:
email: string. The email address of the user.
is_subscribed: bool. The state to set. True means that the user is
subscribed and should continue to receive emails; False means that
they should not.
Returns:
None.
"""
raise NotImplementedError()
notifications = Notifications()
unsubscribe = Unsubscribe()
|
ehiller/CourseBuilderV19-TeacherDashboard
|
models/services.py
|
Python
|
apache-2.0
| 5,349
|
def isAnagram(s, t):
t = reversed(t)
if t == "":
return False
time = 0
for i in t:
if s[time] == i:
time += 1
continue
else:
return False
return True
|
ccqpein/Arithmetic-Exercises
|
Valid-Anagram/VA.py
|
Python
|
apache-2.0
| 230
|
# -*- coding: utf-8 -*-
__author__ = 'Young King'
__email__ = 'yanckin@gmail.com'
__version__ = '0.1.0'
from .tlcache import TLCache
|
youngking/tlcache
|
tlcache/__init__.py
|
Python
|
isc
| 134
|
from rx import AnonymousObservable
from rx.disposables import CompositeDisposable
from .exceptions import DisposedException
def add_ref(xs, r):
def subscribe(observer):
return CompositeDisposable(r.disposable, xs.subscribe(observer))
return AnonymousObservable(subscribe)
def adapt_call(func):
"""Adapt called func.
Adapt called funcfrom taking n params to only taking 1 or 2 params
"""
cached = [None]
def func1(arg1, *_):
return func(arg1)
def func2(arg1, arg2=None, *_):
return func(arg1, arg2)
def func_wrapped(*args, **kw):
if cached[0]:
return cached[0](*args, **kw)
for fn in (func1, func2):
try:
ret = fn(*args, **kw)
except TypeError:
continue
else:
cached[0] = fn
return ret
# Error if we get here. Just call original function to generate a
# meaningful error message
return func(*args, **kw)
return func_wrapped
def check_disposed(this):
if this.is_disposed:
raise DisposedException()
def is_future(p):
return callable(getattr(p, "add_done_callback", None))
class TimeInterval(object):
def __init__(self, value, interval):
self.value = value
self.interval = interval
class Timestamp(object):
def __init__(self, value, timestamp):
self.value = value
self.timestamp = timestamp
|
dbrattli/RxPY
|
rx/internal/utils.py
|
Python
|
apache-2.0
| 1,477
|
class LinkedListNode:
def __init__(self, value):
self.value = value
self.next = None
def kth_to_last_node(fromLast, rootNode):
node = rootNode
totalNodes = 0
while node != None:
totalNodes += 1
node = node.next
#initially forgot this error case
if fromLast > totalNodes:
raise ValueError('node needed is larger than the length of the linked list: %s' % fromLast)
node = rootNode
for i in range(totalNodes-fromLast):
node = node.next
return node
a = LinkedListNode("Angel Food")
b = LinkedListNode("Bundt")
c = LinkedListNode("Cheese")
d = LinkedListNode("Devil's Food")
e = LinkedListNode("Eccles")
a.next = b
b.next = c
c.next = d
d.next = e
print(kth_to_last_node(2, a).value)
# returns the node with value "Devil's Food" (the 2nd to last node)
|
katchengli/tech-interview-prep
|
interview_cake/ic25.py
|
Python
|
apache-2.0
| 844
|
# Generated by Django 1.11.11 on 2018-04-12 01:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zilencer', '0008_customer_billing_user'),
]
operations = [
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(max_length=40, unique=True)),
('stripe_plan_id', models.CharField(max_length=255, unique=True)),
],
),
]
|
showell/zulip
|
zilencer/migrations/0009_plan.py
|
Python
|
apache-2.0
| 617
|
import logging
from collections import OrderedDict
from django.conf import settings
from django.core.cache import cache
from django.shortcuts import render
from apiclient.errors import Error as GoogleAPIError
from oauth2client.client import Error as Oauth2Error
from OpenSSL.crypto import Error as OpenSSLError
from kitsune.announcements.models import Announcement
from kitsune.announcements.forms import AnnouncementForm
from kitsune.lib.sumo_locales import LOCALES
from kitsune.products.models import Product
from kitsune.sumo.googleanalytics import visitors_by_locale
from kitsune.wiki.events import (
ApproveRevisionInLocaleEvent, ReadyRevisionEvent,
ReviewableRevisionInLocaleEvent)
log = logging.getLogger('k.dashboards')
def render_readouts(request, readouts, template, locale=None, extra_data=None, product=None):
"""Render a readouts, possibly with overview page.
Use the given template, pass the template the given readouts, limit the
considered data to the given locale, and pass along anything in the
`extra_data` dict to the template in addition to the standard data.
"""
current_locale = locale or request.LANGUAGE_CODE
on_default_locale = request.LANGUAGE_CODE == settings.WIKI_DEFAULT_LANGUAGE
default_kwargs = {
'locale': settings.WIKI_DEFAULT_LANGUAGE,
}
locale_kwargs = {
'locale': request.LANGUAGE_CODE,
}
ready_kwargs = {}
if product is not None:
default_kwargs['product'] = product.slug
locale_kwargs['product'] = product.slug
ready_kwargs['product'] = product.slug
data = {
'readouts': OrderedDict((slug, class_(request, locale=locale,
product=product))
for slug, class_ in readouts.iteritems()
if class_.should_show_to(request)),
'default_locale': settings.WIKI_DEFAULT_LANGUAGE,
'default_locale_name': LOCALES[settings.WIKI_DEFAULT_LANGUAGE].native,
'current_locale': current_locale,
'current_locale_name': LOCALES[current_locale].native,
'request_locale_name': LOCALES[request.LANGUAGE_CODE].native,
'is_watching_default_approved':
ApproveRevisionInLocaleEvent.is_notifying(request.user, **default_kwargs),
'is_watching_other_approved': (
None if on_default_locale
else ApproveRevisionInLocaleEvent.is_notifying(request.user, **locale_kwargs)),
'is_watching_default_locale': (
ReviewableRevisionInLocaleEvent.is_notifying(request.user, **default_kwargs)),
'is_watching_other_locale': (
None if on_default_locale
else ReviewableRevisionInLocaleEvent.is_notifying(request.user, **locale_kwargs)),
'is_watching_default_ready': ReadyRevisionEvent.is_notifying(request.user, **ready_kwargs),
'on_default_locale': on_default_locale,
'announce_form': AnnouncementForm(),
'announcements': Announcement.get_for_locale_name(current_locale),
'product': product,
'products': Product.objects.filter(visible=True),
}
if extra_data:
data.update(extra_data)
return render(request, 'dashboards/' + template, data)
# Cache it all day to avoid calling Google Analytics over and over.
CACHE_TIMEOUT = 24 * 60 * 60 # 24 hours
def get_locales_by_visit(start_date, end_date):
"""Get a list of (locale, visits) tuples sorted descending by visits."""
cache_key = 'locales_sorted_by_visits:{start}:{end}'.format(
start=start_date, end=end_date)
sorted_locales = cache.get(cache_key)
if sorted_locales is None:
try:
results = visitors_by_locale(start_date, end_date)
locales_and_visits = results.items()
sorted_locales = list(reversed(sorted(
locales_and_visits, key=lambda x: x[1])))
cache.add(cache_key, sorted_locales, CACHE_TIMEOUT)
except (GoogleAPIError, Oauth2Error, OpenSSLError):
# Just return all locales with 0s for visits.
log.exception('Something went wrong getting visitors by locale '
'from Google Analytics. Nobody got a 500 though.')
sorted_locales = [(l, 0) for l in settings.SUMO_LANGUAGES]
return sorted_locales
|
anushbmx/kitsune
|
kitsune/dashboards/utils.py
|
Python
|
bsd-3-clause
| 4,356
|
#!/usr/bin/env python3
import os
import sys
import getopt
import xml.dom.minidom
class CppCreator(object):
def __init__(self, file_name, xml_root, output_path):
if not os.path.exists(output_path):
print ("CppCreator create error")
exit(1)
self.xml_root = xml_root
self.output_path = output_path
self.file_name = file_name
def GetCppRealType(self, type_str, subtype_str):
real_type_str = type_str
if type_str == "int8":
real_type_str = "char"
elif type_str == "uint8":
real_type_str = "unsigned char"
elif type_str == "int16":
real_type_str = "short"
elif type_str == "uint16":
real_type_str = "unsigned short"
elif type_str == "int32":
real_type_str = "int"
elif type_str == "uint32":
real_type_str = "unsigned int"
elif type_str == "int64":
real_type_str = "long long"
elif type_str == "uint64":
real_type_str = "unsigned long long"
elif type_str == "string":
real_type_str = "std::string"
elif type_str == "array":
if subtype_str == "":
print("GetCppRealType : subtype_str can not empty when type is array")
exit(1)
real_type_str = "std::vector<" + self.GetCppRealType(subtype_str, "") + ">"
return real_type_str
def GetSerializeCode(self, type_str, subtype_str, attr_name):
code_str = ""
if type_str == "int8":
code_str += (" collector.WriteInt8(" + attr_name + ");\n")
elif type_str == "uint8":
code_str += (" collector.WriteUint8(" + attr_name + ");\n")
elif type_str == "int16":
code_str += (" collector.WriteInt16(" + attr_name + ");\n")
elif type_str == "uint16":
code_str += (" collector.WriteUint16(" + attr_name + ");\n")
elif type_str == "int32":
code_str += (" collector.WriteInt32(" + attr_name + ");\n")
elif type_str == "uint32":
code_str += (" collector.WriteUint32(" + attr_name + ");\n")
elif type_str == "int64":
code_str += (" collector.WriteInt64(" + attr_name + ");\n")
elif type_str == "uint64":
code_str += (" collector.WriteUint64(" + attr_name + ");\n")
elif type_str == "string":
code_str += (" collector.WriteString(" + attr_name + ");\n")
elif type_str == "array":
if subtype_str == "":
print("GetSerializeCode : subtype_str can not empty when type is array")
exit(1)
code_str += (" collector.WriteUint16((unsigned short)" + attr_name + ".size());\n")
code_str += " for (auto array_item : " + attr_name + ")\n {\n "
sub_serialize_code = self.GetSerializeCode(subtype_str, "", "array_item")
if sub_serialize_code == "":
sub_serialize_code = " array_item.Serialize(collector);\n"
code_str += sub_serialize_code
code_str += " }\n"
return code_str
def GetUnserializeCode(self, type_str, subtype_str, attr_name):
code_str = ""
if type_str == "int8":
code_str += (" " + attr_name + " = collector.ReadInt8();\n")
elif type_str == "uint8":
code_str += (" " + attr_name + " = collector.ReadUint8();\n")
elif type_str == "int16":
code_str += (" " + attr_name + " = collector.ReadInt16();\n")
elif type_str == "uint16":
code_str += (" " + attr_name + " = collector.ReadUint16();\n")
elif type_str == "int32":
code_str += (" " + attr_name + " = collector.ReadInt32();\n")
elif type_str == "uint32":
code_str += (" " + attr_name + " = collector.ReadUint32();\n")
elif type_str == "int64":
code_str += (" " + attr_name + " = collector.ReadInt64();\n")
elif type_str == "uint64":
code_str += (" " + attr_name + " = collector.ReadUint64();\n")
elif type_str == "string":
code_str += (" " + attr_name + " = collector.ReadString();\n")
elif type_str == "array":
if subtype_str == "":
print("GetUnserializeCode : subtype_str can not empty when type is array")
exit(1)
code_str += (" {\n int array_size = collector.ReadUint16();\n " + self.GetCppRealType(subtype_str, "") + " tmp_attr_value;\n")
code_str += " for (int index = 0; index < array_size; ++ index)\n {\n "
sub_serialize_code = self.GetUnserializeCode(subtype_str, "", "tmp_attr_value")
if sub_serialize_code == "":
sub_serialize_code = " tmp_attr_value.Unserialize(collector);\n"
code_str += sub_serialize_code
code_str += (" " + attr_name + ".push_back(tmp_attr_value);\n")
code_str += " }\n }\n"
return code_str
def DoCreate(self):
protocols = self.xml_root.getElementsByTagName("protocol")
hpp_file_str = "#pragma once\n\n#include <string>\n#include <vector>\n#include <elegance/memory/serialize/serialize_base.hpp>\n\nusing face2wind::SerializeBase;\nusing face2wind::SerializeDescribe;\nusing face2wind::ByteArray;\n\nnamespace Protocol {\n\n"
cpp_file_header_str = "#include \"" + self.file_name + ".hpp\"\n\nnamespace Protocol {\n\n"
describe_hpp_str = ""
describe_cpp_str = ""
cpp_file_str = ""
for protocol in protocols:
class_name = protocol.getAttribute("name")
hpp_file_str += ("class " + class_name + " : public SerializeBase\n{\npublic:\n")
cpp_serialize_code = ""
cpp_unserialize_code = ""
attrs = protocol.getElementsByTagName("attr")
for attr in attrs:
type_name = attr.getAttribute("type")
attr_name = attr.getAttribute("name")
subtype_name = ""
real_type_name = ""
if (attr.hasAttribute("subtype")):
subtype_name = attr.getAttribute("subtype")
real_type_name = self.GetCppRealType(type_name, subtype_name)
hpp_file_str += (" " + real_type_name + " " + attr_name + ";\n")
cpp_serialize_code += self.GetSerializeCode(type_name, subtype_name, attr_name)
cpp_unserialize_code += self.GetUnserializeCode(type_name, subtype_name, attr_name)
hpp_file_str += "\n virtual void Serialize(ByteArray &collector) const;\n"
hpp_file_str += " virtual void Unserialize(ByteArray &collector);\n"
hpp_file_str += " virtual const std::string GetTypeName() const { return \"" + class_name + "\"; }\n"
hpp_file_str += "};\n\n"
describe_class_name = "__" + class_name + "Describe__";
describe_hpp_str += ("class " + describe_class_name + " : public SerializeDescribe\n{\npublic:\n " + describe_class_name + "() { GetNameToObjectMap()[\"" + class_name + "\"] = this; }\n virtual ~" + describe_class_name + "() {}\n")
describe_hpp_str += "\nprotected:\n virtual SerializeBase * CreateSerialize() const { return new " + class_name + "(); }\n};\n\n"
describe_cpp_str += (describe_class_name + " " + "for_describe_register_to_" + describe_class_name.lower() + ";\n")
cpp_file_str += ("void " + class_name + "::Serialize(ByteArray &collector) const\n")
cpp_file_str += ("{\n" + cpp_serialize_code + "}\n\n")
cpp_file_str += ("void " + class_name + "::Unserialize(ByteArray &collector)\n")
cpp_file_str += ("{\n" + cpp_unserialize_code + "}\n\n")
cpp_file_str += "}\n\n"
describe_hpp_str += "\n\n"
describe_cpp_str += "\n\n"
hpp_file = open(self.output_path + "/" + self.file_name + ".hpp", "w")
hpp_file.write(hpp_file_str + describe_hpp_str + "}\n\n")
hpp_file.close()
cpp_file = open(self.output_path + "/" + self.file_name + ".cpp", "w")
cpp_file.write(cpp_file_header_str + describe_cpp_str + cpp_file_str)
cpp_file.close()
|
face2wind/Elegance
|
tools/serialize_creator/cpp_creator.py
|
Python
|
lgpl-3.0
| 8,346
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ..representation import CartesianRepresentation
from ..baseframe import BaseCoordinateFrame, TimeFrameAttribute
from .utils import DEFAULT_OBSTIME
class ITRS(BaseCoordinateFrame):
"""
A coordinate or frame in the International Terrestrial Reference System
(ITRS). This is approximately a geocentric system, although strictly it is
defined by a series of reference locations near the surface of the Earth.
For more background on the ITRS, see the references provided in the
:ref:`astropy-coordinates-seealso` section of the documentation.
"""
default_representation = CartesianRepresentation
obstime = TimeFrameAttribute(default=DEFAULT_OBSTIME)
@property
def earth_location(self):
"""
The data in this frame as an `~astropy.coordinates.EarthLocation` class.
"""
from ..earth import EarthLocation
cart = self.represent_as(CartesianRepresentation)
return EarthLocation(x=cart.x, y=cart.y, z=cart.z)
# Self-transform is in intermediate_rotation_transforms.py with all the other
# ITRS transforms
|
joergdietrich/astropy
|
astropy/coordinates/builtin_frames/itrs.py
|
Python
|
bsd-3-clause
| 1,296
|
__author__ = 'mramire8'
__copyright__ = "Copyright 2014, ML Lab"
__version__ = "0.1"
__status__ = "Research"
import sys
import os
sys.path.append(os.path.abspath("."))
sys.path.append(os.path.abspath("../"))
sys.path.append(os.path.abspath("../experiment/"))
from experiment.experiment_utils import *
import argparse
import numpy as np
from sklearn.datasets.base import Bunch
from datautil.load_data import load_from_file, split_data
from datautil.textutils import StemTokenizer
import time
from sklearn import metrics
from strategy import randomsampling, structured
from collections import defaultdict
from expert import baseexpert
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# import random
import nltk
from scipy.sparse import vstack
from datautil.textutils import TwitterSentenceTokenizer
############# COMMAND LINE PARAMETERS ##################
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--train',
metavar='TRAIN',
default="20news",
help='training data (libSVM format)')
ap.add_argument('--neutral-threshold',
metavar='NEUTRAL',
type=float,
default=.4,
help='neutrality threshold of uncertainty')
ap.add_argument('--expert-penalty',
metavar='EXPERT_PENALTY',
type=float,
default=1,
help='Expert penalty value for the classifier simulation')
ap.add_argument('--expert',
metavar='EXPERT_TYPE',
type=str,
default='pred',
help='Type of expert [neutral|true|pred]')
ap.add_argument('--student',
metavar='STUDENT_TYPE',
type=str,
default='sr',
choices=['sr', 'fixkSRMax', 'sr_rnd'],
help='Type of 7 [sr|rnd|fixkSR|sr_seq|firsk_seq|rnd_max | rnd_firstk| firstkmax_tfe | firstkmax_seq_tfe]')
ap.add_argument('--trials',
metavar='TRIALS',
type=int,
default=5,
help='number of trials')
ap.add_argument('--folds',
metavar='FOLDS',
type=int,
default=1,
help='number of folds')
ap.add_argument('--budget',
metavar='BUDGET',
type=int,
default=700,
help='budget')
ap.add_argument('--step-size',
metavar='STEP_SIZE',
type=int,
default=10,
help='instances to acquire at every iteration')
ap.add_argument('--bootstrap',
metavar='BOOTSTRAP',
type=int,
default=10,
help='size of the initial labeled dataset')
ap.add_argument('--cost-function',
metavar='COST_FUNCTION',
type=str,
default="uniform",
help='cost function of the x-axis [uniform|log|linear|direct]')
ap.add_argument('--cost-model',
metavar='COST_MODEL',
type=str,
default="[[10.0,5.7], [25.0,8.2], [50.1,10.9], [75,15.9], [100,16.7], [125,17.8], [150,22.7], [175,19.9], [200,17.4]]",
help='cost function parameters of the cost function')
ap.add_argument('--classifier',
metavar='STUDENT_MODEL',
type=str,
default='lrl2',
choices=['lr','mnb', 'lradapt', 'lradaptv2', 'lrl2'],
help='classifier to use for all models')
ap.add_argument('--limit',
metavar='LIMIT',
type=int,
default=2,
help='size to remove')
ap.add_argument('--maxiter',
metavar='MAXITER',
type=int,
default=70,
help='Max number of iterations')
ap.add_argument('--prefix',
metavar='FILENAMEPREFIX',
type=str,
default="testing-",
help='TO PUT IN THE NAMES')
ap.add_argument('--seed',
metavar='SEED',
type=int,
default=876543210,
help='Max number of iterations')
ap.add_argument('--cheating',
action="store_true",
help='experiment cheating version - study purposes')
ap.add_argument('--calibrate',
action="store_true",
help='calibrate student sentence classifier scores for SR')
ap.add_argument('--logitscores',
action="store_true",
help='logit applied to the z-scores during calibration')
ap.add_argument('--fulloracle',
action="store_true",
help='train oracle on all data')
ap.add_argument('--calithreshold',
metavar='CALIBRATION',
type=str,
default="(.5,.5)",
help='threshold of calibration values')
args = ap.parse_args()
rand = np.random.mtrand.RandomState(args.seed)
# sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def sentences_average(pool, vct, sent_detector):
## COMPUTE: AVERAGE SENTENCES IN DOCUMENTS
tk = vct.build_tokenizer()
allwords = 0.
sum_sent = 0.
average_words = 0
min_sent = 10000
max_sent = 0
for docid, label in zip(pool.remaining, pool.target):
doc = pool.text[docid].replace("<br>", ". ")
doc = doc.replace("<br />", ". ")
isent = sent_detector.tokenize(doc)
sum_sent += len(isent)
min_sent = min(min_sent, len(isent))
max_sent = max(max_sent, len(isent))
for s in sent_detector.tokenize(doc):
average_words += len(tk(s))
allwords += 1
print("Average sentences fragments %s" % (sum_sent / len(pool.target)))
print("Min sentences fragments %s" % min_sent)
print("Max sentences fragments %s" % max_sent)
print("Total sentences fragments %s" % sum_sent)
print("Average size of sentence %s" % (average_words / allwords))
###################### MAIN ####################
def get_student(clf, cost_model, sent_clf, sent_token, vct):
cheating = args.cheating
if args.student in "fixkSR":
# student = structured.AALStructuredFixk(model=clf, accuracy_model=None, budget=args.budget, seed=t, vcn=vct,
# subpool=250, cost_model=cost_model)
# student.set_score_model(expert)
student = structured.AALStructuredReadingFirstK(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct,
subpool=250, cost_model=cost_model)
elif args.student in "fixkSRMax":
student = structured.AALStructuredReadingFirstKMax(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct,
subpool=250, cost_model=cost_model)
elif args.student in "sr":
student = structured.AALStructuredReadingMax(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct,
subpool=250, cost_model=cost_model)
elif args.student in "sr_tfe":
student = structured.AALTFEStructuredReading(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct,
subpool=250, cost_model=cost_model)
elif args.student in "firstk_tfe":
student = structured.AALTFEStructuredReadingFK(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct,
subpool=250, cost_model=cost_model)
elif args.student in "sr_rnd":
student = structured.AALStructuredReadingMax(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct,
subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_rnd)
elif args.student in "firstkmax_tfe":
student = structured.AALTFEStructuredReadingFK(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct,
subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_fk_max)
elif args.student in "sr_seq":
student = structured.AALUtilityThenSR_Max(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct, subpool=250, cost_model=cost_model)
elif args.student in "sr_seq_tfe":
student = structured.AALTFEUtilityThenSR_Max(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed,
vcn=vct, subpool=250, cost_model=cost_model)
elif args.student in "firstk_seq":
student = structured.AALUtilityThenSR_Firstk(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
elif args.student in "firstkmax_seq":
student = structured.AALUtilityThenSR_Firstk(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_fk_max)
elif args.student in "firstkmax_seq_tfe":
student = structured.AALTFEUtilityThenSR_Max(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_fk_max)
else:
raise ValueError("Oops! We do not know that anytime strategy. Try again.")
student.set_score_model(clf) # student classifier
student.set_sentence_model(sent_clf) # cheating part, use and expert in sentences
student.set_cheating(cheating)
student.limit = args.limit
if args.calibrate:
student.set_sent_score(student.score_p0)
student.calibratescores = True
student.set_calibration_threshold(parse_parameters_mat(args.calithreshold))
if args.logitscores:
student.logit_scores = True
student.sent_detector = sent_token
return student
def update_sentence(neutral_data, neu_x, neu_y, labels, query_index, pool, vct, sent_detector):
"""`
Add to P_S all the sentences in the documents with the label of the document
:param neutral_data:
:param neu_x:
:param neu_y:
:param labels:
:param query_index:
:param pool:
:param vct:
:return:
"""
if not args.cheating: #prepapre the data to update P_S
qlbl = []
## for every query find the text and sentences
for lbl, index in zip(labels, query_index):
subinstances = sent_detector.tokenize(pool.text[index])
doc_sentences = vct.transform(subinstances)
for xik in doc_sentences: #, confidence:
# if confident:
if isinstance(neutral_data, list):
neutral_data = xik
else:
neutral_data = vstack([neutral_data, xik], format='csr')
qlbl.append(lbl)
neu_y = np.append(neu_y, qlbl)
neu_x = neutral_data
else: # for compatibility with cheating experiments
return np.array([]),np.array([]),np.array([])
return neu_x, neu_y, neutral_data
def update_sentence_query(neutral_data, neu_x, neu_y, query, labels):
'''
Add only the annotated sentence to P_s
:param neutral_data:
:param neu_x:
:param neu_y:
:param query:
:param labels:
:return:
'''
if not args.cheating:
if isinstance(neutral_data, list):
neutral_data = query
else:
for q in query:
neutral_data = vstack([neutral_data, q], format='csr')
neu_y = np.append(neu_y, labels)
neu_x = neutral_data
else:
return np.array([]),np.array([]),np.array([])
return neu_x, neu_y, neutral_data
def main():
print args
print
accuracies = defaultdict(lambda: [])
ora_cm = defaultdict(lambda: [])
ora_accu = defaultdict(lambda: [])
oracle_accuracies =[]
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = TfidfVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=False, ngram_range=(1, 1),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = 10
args.fixk = None
data, vct = load_from_file(args.train, [categories[3]], args.fixk, min_size, vct, raw=True)
print "Vectorizer:", vct
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
### SENTENCE TRANSFORMATION
if args.train == "twitter":
sent_detector = TwitterSentenceTokenizer()
else:
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
## delete <br> to "." to recognize as end of sentence
data.train.data = clean_html(data.train.data)
data.test.data = clean_html(data.test.data)
print("Train:{}, Test:{}".format(len(data.train.data), len(data.test.data)))
## Get the features of the sentence dataset
## create splits of data: pool, test, oracle, sentences
expert_data = Bunch()
if not args.fulloracle:
train_test_data = Bunch()
expert_data.sentence, train_test_data.pool = split_data(data.train)
expert_data.oracle, train_test_data.test = split_data(data.test)
data.train.data = train_test_data.pool.train.data
data.train.target = train_test_data.pool.train.target
data.test.data = train_test_data.test.train.data
data.test.target = train_test_data.test.train.target
## convert document to matrix
data.train.bow = vct.fit_transform(data.train.data)
data.test.bow = vct.transform(data.test.data)
print "Features:", data.train.bow.shape[1]
#### EXPERT CLASSIFIER: ORACLE
print("Training Oracle expert")
exp_clf = set_classifier(args.classifier, parameter=args.expert_penalty)
if not args.fulloracle:
print "Training expert documents:%s" % len(expert_data.oracle.train.data)
labels, sent_train = split_data_sentences(expert_data.oracle.train, sent_detector, vct, limit=args.limit)
expert_data.oracle.train.data = sent_train
expert_data.oracle.train.target = np.array(labels)
expert_data.oracle.train.bow = vct.transform(expert_data.oracle.train.data)
exp_clf.fit(expert_data.oracle.train.bow, expert_data.oracle.train.target)
else:
# expert_data.data = np.concatenate((data.train.data, data.test.data))
# expert_data.target = np.concatenate((data.train.target, data.test.target))
expert_data.data =data.train.data
expert_data.target = data.train.target
expert_data.target_names = data.train.target_names
labels, sent_train = split_data_sentences(expert_data, sent_detector, vct, limit=args.limit)
expert_data.bow = vct.transform(sent_train)
expert_data.target = labels
expert_data.data = sent_train
exp_clf.fit(expert_data.bow, expert_data.target)
if "neutral" in args.expert:
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
elif "true" in args.expert:
expert = baseexpert.TrueOracleExpert(cost_function=cost_model.cost_function)
elif "pred" in args.expert:
expert = baseexpert.PredictingExpert(exp_clf, #threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
else:
raise Exception("We need an expert!")
print "\nExpert: %s " % expert
#### EXPERT CLASSIFIER: SENTENCES
print("Training sentence expert")
sent_clf = None
if args.cheating:
labels, sent_train = split_data_sentences(expert_data.sentence.train, sent_detector, vct, limit=args.limit)
expert_data.sentence.train.data = sent_train
expert_data.sentence.train.target = np.array(labels)
expert_data.sentence.train.bow = vct.transform(expert_data.sentence.train.data)
sent_clf = set_classifier(args.classifier, parameter=args.expert_penalty)
sent_clf.fit(expert_data.sentence.train.bow, expert_data.sentence.train.target)
#### STUDENT CLASSIFIER
clf = set_classifier(args.classifier, parameter=args.expert_penalty)
print "\nStudent Classifier: %s" % clf
print "\nSentence Classifier: %s" % sent_clf
print "\nExpert Oracle Classifier: %s" % exp_clf
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Anytime active learning experiment - use objective function to pick data")
t0 = time.time()
tac = []
tau = []
### experiment starts
for t in range(args.trials):
trial_accu = []
trial_aucs = []
print "*" * 60
print "Trial: %s" % t
student = get_student(clf, cost_model, sent_clf, sent_detector, vct)
print "\nStudent: %s " % student
train_indices = []
neutral_data = [] # save the xik vectors
train_x = []
train_y = []
neu_x = [] # data to train the classifier
neu_y = np.array([])
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.text = data.train.data
pool.target = data.train.target
pool.predicted = []
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bootstrapped = False
current_cost = 0
iteration = 0
query_index = None
query_size = None
oracle_answers = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
util = []
t1 = time.time()
if not bootstrapped:
## random from each bootstrap
bt = randomsampling.BootstrapFromEach(t * 10)
query_index = bt.bootstrap(pool=pool, k=bootstrap_size)
bootstrapped = True
query = pool.data[query_index]
print "Bootstrap: %s " % bt.__class__.__name__
print
else:
if args.calibrate:
chosen = student.pick_next_cal(pool=pool, step_size=step_size)
else:
chosen = student.pick_next(pool=pool, step_size=step_size)
query_index = [x for x, y in chosen] # document id of chosen instances
query = [y for x, y in chosen] # sentence of the document
query_size = [1] * len(query_index)
ground_truth = pool.target[query_index]
if iteration == 0: ## bootstrap uses ground truth
labels = ground_truth
spent = [0] * len(ground_truth) ## bootstrap cost is ignored
else:
# print "ask labels"
labels = expert.label_instances(query, ground_truth)
spent = expert.estimate_instances(query_size)
### accumulate the cost of the query
query_cost = np.array(spent).sum()
current_cost += query_cost
useful_answers = np.array([[x, y] for x, y in zip(query_index, labels) if y is not None])
neutral_answers = np.array([[x, z] for x, y, z in zip(query_index, labels, query_size) if y is None]) \
if iteration != 0 else np.array([])
## add data recent acquired to train
if useful_answers.shape[0] != 0:
train_indices.extend(useful_answers[:, 0])
# add labels to training
train_x = pool.data[train_indices] # # train with all the words
# update labels with the expert labels
train_y.extend(useful_answers[:, 1])
#TODO: get the sentence data ready for training
# if not student.get_cheating(): #prepapre the data to update P_S
neu_x, neu_y, neutral_data = update_sentence(neutral_data, neu_x, neu_y, labels, query_index, pool, vct, sent_detector)
# neu_x, neu_y, neutral_data = update_sentence_query(neutral_data, neu_x, neu_y, query, labels)
if neu_y.shape[0] != neu_x.shape[0]:
raise Exception("Training data corrupted!")
if train_x.shape[0] != len(train_y):
raise Exception("Training data corrupted!")
# remove labels from pool
pool.remaining.difference_update(query_index)
print "time mid:", time.time()-t1
# retrain the model
current_model = student.train_all(train_x, train_y, neu_x, neu_y)
print "time:", time.time() - t1
# evaluate and save results
y_probas = current_model.predict_proba(data.test.bow)
auc = metrics.roc_auc_score(data.test.target, y_probas[:, 1])
pred_y = current_model.classes_[np.argmax(y_probas, axis=1)]
correct_labels = (np.array(ground_truth) == np.array(labels).reshape(len(labels))).sum()
accu = metrics.accuracy_score(data.test.target, pred_y)
print ("TS:{0}\tAccu:{1:.3f}\tAUC:{2:.3f}\tCost:{3:.2f}\tCumm:{4:.2f}\tSpent:{5}\tneu:{6}\t{7}\tND:{8}\tTD:{9}\t ora_accu:{10}".format(
len(train_indices),
accu,
auc, query_cost,
current_cost,
spent,
len(neutral_answers), neu_y.shape[0], neu_y.sum(), np.array(train_y).sum(), correct_labels))
## the results should be based on the cost of the labeling
if iteration > 0: # bootstrap iteration
student.budget -= query_cost ## Bootstrap doesn't count
# oracle accuracy (from queries)
oracle_answers += correct_labels
x_axis_range = current_cost
x_axis[x_axis_range].append(current_cost)
## save results
accuracies[x_axis_range].append(accu)
aucs[x_axis_range].append(auc)
ora_accu[x_axis_range].append(1. * correct_labels/len(ground_truth))
ora_cm[x_axis_range].append(metrics.confusion_matrix(ground_truth, labels, labels=np.unique(train_y)))
# partial trial results
trial_accu.append([x_axis_range, accu])
trial_aucs.append([x_axis_range, auc])
# oracle_accuracies[x_axis_range].append(oracle_answers)
iteration += 1
# end of budget loop
tac.append(trial_accu)
tau.append(trial_aucs)
oracle_accuracies.append(1.*oracle_answers / (len(train_indices)-bootstrap_size))
print "Trial: {}, Oracle right answers: {}, Iteration: {}, Labels:{}, ACCU-OR:{}".format(t, oracle_answers,
iteration, len(train_indices)-bootstrap_size,1.*oracle_answers / (len(train_indices)-bootstrap_size))
#end trial loop
if args.cost_function not in "uniform":
accuracies = extrapolate_trials(tac, cost_25=parameters[1][1], step_size=args.step_size)
aucs = extrapolate_trials(tau, cost_25=parameters[1][1], step_size=args.step_size)
print "\nAverage oracle accuracy: ", np.array(oracle_accuracies).mean()
print("Elapsed time %.3f" % (time.time() - t0))
cheating = "CHEATING" if args.cheating else "NOCHEAT"
print_extrapolated_results(accuracies, aucs, file_name=args.train+"-"+cheating+"-"+args.prefix+"-"+args.classifier+"-"+args.student)
# experiment_utils.oracle_accuracy(ora_accu, file_name=args.train+"-"+cheating+"-"+args.student)
oracle_accuracy(ora_accu, file_name=args.train+"-"+cheating+"-"+args.prefix+"-"+args.classifier+"-"+args.student, cm=ora_cm, num_trials=args.trials)
def format_query(query_labels):
string = ""
for l, q in query_labels:
string = string + "{0}".format(l)
for qi in q:
string = string + "\t{0:.2f} ".format(qi)
string = string + "\n"
return string
def main2():
# # load paramters: student, expert, cost, sampling, ...
# set_options()
#
# # load data and preprocess
# pre_process_data(set_datasets())
#
# # start loop
pass
## MAIN FUNCTION
if __name__ == '__main__':
main()
|
mramire8/active
|
sentences/sent_cheat.py
|
Python
|
apache-2.0
| 26,275
|
from collections import defaultdict
import pronouncing
import re
from sortedcontainers import SortedList
# https://en.wikipedia.org/wiki/Arpabet
CONSONANT_PHONES = {
'P', 'B', 'T', 'D', 'K', 'G',
'CH', 'JH',
'F', 'V', 'TH', 'DH', 'S', 'Z', 'SH', 'ZH', 'HH',
'M', 'EM', 'N', 'EN', 'NG', 'ENG',
'L', 'EL', 'R', 'DX', 'NX',
'Y', 'W', 'Q'}
cons = '(?:' + '|'.join(CONSONANT_PHONES) + ')'
first_consonants = '(?:{}(?: {})*)'.format(cons, cons)
splitter_re = '^(?P<start>{}) (?P<rest>.+)$'.format(first_consonants)
splitter = re.compile(splitter_re)
filter_re = re.compile("['\.]")
corpus_re = re.compile("(.+)\W+(\d+)")
WORD_FREQ = {}
def build_word_freq(corpus=None):
if corpus is None:
corpus = open('resources/words.txt', 'r')
for line in corpus:
match = corpus_re.match(line)
word = match.groups()[0]
freq = match.groups()[1]
WORD_FREQ[word.strip()] = int(freq)
BY_END = defaultdict(SortedList)
BY_START = defaultdict(SortedList)
BY_PHONES = defaultdict(SortedList)
# TODO: Serialize this and reload it must be faster
def build_db():
pronouncing.init_cmu()
build_word_freq()
for word, phones in pronouncing.pronunciations:
if len(word) < 3:
continue
if filter_re.search(word):
continue
if word not in WORD_FREQ:
continue
match = splitter.match(phones)
if not match:
continue
start, end = match.groups()
rank = WORD_FREQ.get(word, 10000)
t = (rank, start, end, word)
BY_END[end].add(t)
BY_START[start].add(t)
BY_PHONES[phones].add(t)
build_db()
def split(word):
"""Return a list of tuples of (rank, 1st consonants, subsequent phones, word.
Rank of 0 indicates that the word is not in the word frequency list.
"""
word = word.lower()
phones = pronouncing.phones_for_word(word)
res = []
for p in phones:
match = splitter.match(p)
if match:
rank = WORD_FREQ.get(word, 0)
res.append((rank, *match.groups(), word))
return res
def rhymes(word):
"""Return all the words sharing everything after the first sound."""
_, start, rest, _ = split(word)[0]
return (t for t in BY_END[rest] if t[3] != word)
def alliterates(word):
"""Return all the words sharing the same first sound."""
_, start, _, _ = split(word)[0]
return (t for t in BY_START[start] if t[3] != word)
def pairs(word, limit_to_letter=None):
"""Returns a generator of valid spoonerism pairs for the input word."""
l1_splits = split(word)
combinations = []
for l1 in l1_splits:
for r1 in rhymes(l1[-1]):
if limit_to_letter and not r1[-1].startswith(limit_to_letter):
continue
if l1[1] == r1[1]:
continue
for l2 in alliterates(r1[-1]):
target_phones = '{} {}'.format(l1[1], l2[2])
r2 = BY_PHONES.get(target_phones)
#for r2 in BY_PHONES.get(target_phones, ()):
if r2:
r2 = sorted(r2, key=lambda r: r[0])
combinations.append((l1, l2, r1, r2[0]))
sorted_combinations = sorted(combinations, key=lambda p: p[2][0] + p[1][0] + p[3][0], reverse=True)
# TODO Unique
return [[w[-1] for w in c] for c in sorted_combinations]
|
scottynomad/spoonerist
|
spoonerist/data.py
|
Python
|
apache-2.0
| 3,416
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os
class LatexTableCreator(object):
def create_latex_table_for_specifications_for_model(self, specification, model_name, dir):
if specification is None:
return
from opus_core.latex import LaTeX
spec_table = specification.get_table_summary()
latex = LaTeX()
# Latex does not like underscores in file names, so use hyphens instead.
hyphenated_model_name = model_name.replace('_', '-')
label = 'table:%s-specification' % hyphenated_model_name
caption = '%s Specification' % model_name.replace('_', ' ').title()
latex.save_specification_table_to_tex_file(spec_table, os.path.join(dir, hyphenated_model_name + '-specification.tex'),
label=label, caption=caption )
def create_latex_table_for_coefficients_for_model(self, coefficients, model_name, dir, other_info_keys = None):
"""Write this model's coefficients to a LaTeX table in directory dir.
Table is named as <model_name>_coefficients.tex.
"""
if coefficients is None:
return
# Latex does not like underscores in file names, so use hyphens instead.
hyphenated_model_name = model_name.replace('_', '-')
label = 'table:%s-coefficients' % hyphenated_model_name
caption = '%s Coefficients' % model_name.replace('_', ' ').title()
coefficients.make_tex_table(os.path.join(dir, hyphenated_model_name + '-coefficients'),
other_info_keys=other_info_keys, label=label, caption=caption)
|
apdjustino/DRCOG_Urbansim
|
src/opus_core/latex_table_creator.py
|
Python
|
agpl-3.0
| 1,814
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
window_size_ms, window_stride_ms,
dct_coefficient_count):
"""Calculates common settings needed for all models.
Args:
label_count: How many classes are to be recognized.
sample_rate: Number of audio samples per second.
clip_duration_ms: Length of each audio clip to be analyzed.
window_size_ms: Duration of frequency analysis window.
window_stride_ms: How far to move in time between frequency windows.
dct_coefficient_count: Number of frequency bins to use for analysis.
Returns:
Dictionary containing common settings.
"""
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
window_stride_samples = int(sample_rate * window_stride_ms / 1000)
length_minus_window = (desired_samples - window_size_samples)
if length_minus_window < 0:
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
fingerprint_size = dct_coefficient_count * spectrogram_length
return {
'desired_samples': desired_samples,
'window_size_samples': window_size_samples,
'window_stride_samples': window_stride_samples,
'spectrogram_length': spectrogram_length,
'dct_coefficient_count': dct_coefficient_count,
'fingerprint_size': fingerprint_size,
'label_count': label_count,
'sample_rate': sample_rate,
}
def create_model(fingerprint_input, model_settings, model_architecture,
is_training):
"""Builds a model of the requested architecture compatible with the settings.
There are many possible ways of deriving predictions from a spectrogram
input, so this function provides an abstract interface for creating different
kinds of models in a black-box way. You need to pass in a TensorFlow node as
the 'fingerprint' input, and this should output a batch of 1D features that
describe the audio. Typically this will be derived from a spectrogram that's
been run through an MFCC, but in theory it can be any feature vector of the
size specified in model_settings['fingerprint_size'].
The function will build the graph it needs in the current TensorFlow graph,
and return the tensorflow output that will contain the 'logits' input to the
softmax prediction process. If training flag is on, it will also return a
placeholder node that can be used to control the dropout amount.
See the implementations below for the possible model architectures that can be
requested.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
model_architecture: String specifying which kind of model to create.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
Raises:
Exception: If the architecture type isn't recognized.
"""
if model_architecture == 'single_fc':
return create_single_fc_model(fingerprint_input, model_settings,
is_training)
elif model_architecture == 'conv':
return create_conv_model(fingerprint_input, model_settings, is_training)
elif model_architecture == 'low_latency_conv':
return create_low_latency_conv_model(fingerprint_input, model_settings,
is_training)
else:
raise Exception('model_architecture argument "' + model_architecture +
'" not recognized, should be one of "single_fc", "conv",' +
' or "low_latency_conv"')
def load_variables_from_checkpoint(sess, start_checkpoint):
"""Utility function to centralize checkpoint restoration.
Args:
sess: TensorFlow session.
start_checkpoint: Path to saved checkpoint on disk.
"""
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, start_checkpoint)
def create_single_fc_model(fingerprint_input, model_settings, is_training):
"""Builds a model with a single hidden fully-connected layer.
This is a very simple model with just one matmul and bias layer. As you'd
expect, it doesn't produce very accurate results, but it is very fast and
simple, so it's useful for sanity testing.
Here's the layout of the graph:
(fingerprint_input)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
weights = tf.Variable(
tf.truncated_normal([fingerprint_size, label_count], stddev=0.001))
bias = tf.Variable(tf.zeros([label_count]))
logits = tf.matmul(fingerprint_input, weights) + bias
if is_training:
return logits, dropout_prob
else:
return logits
def create_conv_model(fingerprint_input, model_settings, is_training):
"""Builds a standard convolutional model.
This is roughly the network labeled as 'cnn-trad-fpool3' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces fairly good quality results, but can involve a large number of
weight parameters and computations. For a cheaper alternative from the same
paper with slightly less accuracy, see 'low_latency_conv' below.
During training, dropout nodes are introduced after each relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 20
first_filter_count = 64
first_weights = tf.Variable(
tf.truncated_normal(
[first_filter_height, first_filter_width, 1, first_filter_count],
stddev=0.01))
first_bias = tf.Variable(tf.zeros([first_filter_count]))
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [1, 1, 1, 1],
'SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
max_pool = tf.nn.max_pool(first_dropout, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
second_filter_width = 4
second_filter_height = 10
second_filter_count = 64
second_weights = tf.Variable(
tf.truncated_normal(
[
second_filter_height, second_filter_width, first_filter_count,
second_filter_count
],
stddev=0.01))
second_bias = tf.Variable(tf.zeros([second_filter_count]))
second_conv = tf.nn.conv2d(max_pool, second_weights, [1, 1, 1, 1],
'SAME') + second_bias
second_relu = tf.nn.relu(second_conv)
if is_training:
second_dropout = tf.nn.dropout(second_relu, dropout_prob)
else:
second_dropout = second_relu
second_conv_shape = second_dropout.get_shape()
second_conv_output_width = second_conv_shape[2]
second_conv_output_height = second_conv_shape[1]
second_conv_element_count = int(
second_conv_output_width * second_conv_output_height *
second_filter_count)
flattened_second_conv = tf.reshape(second_dropout,
[-1, second_conv_element_count])
label_count = model_settings['label_count']
final_fc_weights = tf.Variable(
tf.truncated_normal(
[second_conv_element_count, label_count], stddev=0.01))
final_fc_bias = tf.Variable(tf.zeros([label_count]))
final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
def create_low_latency_conv_model(fingerprint_input, model_settings,
is_training):
"""Builds a convolutional model with low compute requirements.
This is roughly the network labeled as 'cnn-one-fstride4' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces slightly lower quality results than the 'conv' model, but needs
fewer weight parameters and computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = input_time_size
first_filter_count = 186
first_filter_stride_x = 1
first_filter_stride_y = 4
first_weights = tf.Variable(
tf.truncated_normal(
[first_filter_height, first_filter_width, 1, first_filter_count],
stddev=0.01))
first_bias = tf.Variable(tf.zeros([first_filter_count]))
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
1, first_filter_stride_y, first_filter_stride_x, 1
], 'VALID') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
first_conv_output_width = math.floor(
(input_frequency_size - first_filter_width + first_filter_stride_x) /
first_filter_stride_x)
first_conv_output_height = math.floor(
(input_time_size - first_filter_height + first_filter_stride_y) /
first_filter_stride_y)
first_conv_element_count = int(
first_conv_output_width * first_conv_output_height * first_filter_count)
flattened_first_conv = tf.reshape(first_dropout,
[-1, first_conv_element_count])
first_fc_output_channels = 128
first_fc_weights = tf.Variable(
tf.truncated_normal(
[first_conv_element_count, first_fc_output_channels], stddev=0.01))
first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))
first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 128
second_fc_weights = tf.Variable(
tf.truncated_normal(
[first_fc_output_channels, second_fc_output_channels], stddev=0.01))
second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.Variable(
tf.truncated_normal(
[second_fc_output_channels, label_count], stddev=0.01))
final_fc_bias = tf.Variable(tf.zeros([label_count]))
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/examples/speech_commands/models.py
|
Python
|
mit
| 14,160
|
from __future__ import print_function
# Author: Brendan Le Foll <brendan.le.foll@intel.com>
# Contributions: Sarah Knepper <sarah.knepper@intel.com>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
from upm import pyupm_grove as grove
def main():
# Create the temperature sensor object using AIO pin 0
temp = grove.GroveTemp(0)
print(temp.name())
# Read the temperature ten times, printing both the Celsius and
# equivalent Fahrenheit temperature, waiting one second between readings
for i in range(0, 10):
celsius = temp.value()
fahrenheit = celsius * 9.0/5.0 + 32.0;
print("%d degrees Celsius, or %d degrees Fahrenheit" \
% (celsius, fahrenheit))
time.sleep(1)
# Delete the temperature sensor object
del temp
if __name__ == '__main__':
main()
|
sasmita/upm
|
examples/python/grovetemp.py
|
Python
|
mit
| 1,892
|
#!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Li Ge <lge@us.ibm.com)
# Positive Test: create domain, attach block device, verify list
from XmTestLib import *
from XmTestLib.block_utils import block_attach
if ENABLE_HVM_SUPPORT:
SKIP("Block-list not supported for HVM domains")
domain = XmTestDomain()
try:
console = domain.start()
except DomainError, e:
if verbose:
print e.extra
FAIL("Unable to create domain")
#Attach one virtual block device to domainU
block_attach(domain, "phy:/dev/ram0", "xvda1")
#Verify block-list on Domain0
status, output = traceCommand("xm block-list %s" % domain.getId())
eyecatcher = "51713"
where = output.find(eyecatcher)
if status != 0:
FAIL("xm block-list returned bad status, expected 0, status is %i" % status)
elif where < 0 :
FAIL("Fail to list block device")
#Verify attached block device on DomainU
try:
run = console.runCmd("cat /proc/partitions | grep xvda1")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
domain.stop()
if run["return"] != 0:
FAIL("Failed to verify that block dev is attached on DomainU")
|
YongMan/Xen-4.3.1
|
tools/xm-test/tests/block-list/02_block-list_attachbd_pos.py
|
Python
|
gpl-2.0
| 1,170
|
import viper.lexer as vl
import os
import pytest
from importlib import import_module
from typing import List, Type
###############################################################################
#
# INDIVIDUAL TOKENS
#
###############################################################################
def _test_single_token(token: str, lexeme_type: Type[vl.Lexeme]):
lexemes = vl.lex_token(token)
assert len(lexemes) == 1
assert lexemes[0] == lexeme_type(token)
def _test_bad_single_token(token: str, intended_type: Type[vl.Lexeme]):
try:
lexemes = vl.lex_token(token)
except vl.LexerError:
assert True
else:
if len(lexemes) == 1:
assert lexemes[0] != intended_type(token)
else:
assert True
# COMMA
def test_comma():
lexemes = vl.lex_token(',')
assert len(lexemes) == 1
assert lexemes[0] == vl.COMMA
# INT
@pytest.mark.parametrize('token', [
'42', '-42',
])
def test_int(token: str):
_test_single_token(token, vl.Int)
# FLOAT
@pytest.mark.parametrize('token', [
'.42', '.42e8', '.42E8', '.42e+8', '.42E+8', '.42e-8', '.42E-8', '-.42', '-.42e8',
'42e8', '42E8', '42e+8', '42E+8', '42e-8', '42E-8', '-42e8',
'42.', '42.e8', '42.E8', '42.e+8', '42.E+8', '42.e-8', '42.E-8', '-42.', '-42.e8',
'4.2', '4.2e8', '4.2E8', '4.2e+8', '4.2E+8', '4.2e-8', '4.2E-8', '-4.2', '-4.2e8',
])
def test_float(token: str):
_test_single_token(token, vl.Float)
@pytest.mark.parametrize('token', [
'42e', '42E', '42e+', '42E+', '42e-', '42E-',
'42.e', '42.E', '42.e+', '42.E+', '42.e-', '42.E-',
'4.2e', '4.2E', '4.2e+', '4.2E+', '4.2e-', '4.2E-',
])
def test_bad_float(token: str):
_test_bad_single_token(token, vl.Float)
# NAME
@pytest.mark.parametrize('token', [
'a', '_a', 'aa', 'aA', 'a1',
'a-b-c', 'a-B-C', 'a1-b2-c3',
'a!', 'a@', 'a$', 'a%', 'a^', 'a&', 'a*', 'a?',
])
def test_name(token: str):
_test_single_token(token, vl.Name)
@pytest.mark.parametrize('token', [
'A', 'AB', '-a', 'a-', 'a-b-', 'a-?', 'a?!', '42', '42e3', '?', '?!',
])
def test_bad_name(token: str):
_test_bad_single_token(token, vl.Name)
# UNDERSCORE
@pytest.mark.parametrize('token', [
'_', '__', '___',
])
def test_underscore(token: str):
_test_single_token(token, vl.Underscore)
# CLASS
@pytest.mark.parametrize('token', [
'A', 'AB', 'AThing', 'AnotherThing', 'A-Thing', 'Another-Thing', 'Plan9', 'Plan-9-From-Outer-Space',
])
def test_class(token: str):
_test_single_token(token, vl.Class)
@pytest.mark.parametrize('token', [
'a-class', 'a_class', 'aA', 'A-', 'A-?', '9', '9a', '!?',
])
def test_bad_class(token: str):
_test_bad_single_token(token, vl.Class)
# OPERATOR
@pytest.mark.parametrize('token', [
'!', '@', '$', '%', '^', '&', '*', '-', '+', '|', '/', '?', '<', '>',
'[', ']', '{', '}', '~',
'!@', '<>', '::', '.&',
])
def test_operator(token: str):
_test_single_token(token, vl.Operator)
@pytest.mark.parametrize('token', [
'a', 'A', 'aA', 'AA', 'Aa', 'aa', '(42)', ':', '(', ')', '->', '.',
])
def test_bad_operator(token: str):
_test_bad_single_token(token, vl.Operator)
###############################################################################
#
# LEXING LINES
#
###############################################################################
# OPERATORS
@pytest.mark.parametrize('line,correct_lexemes', [
('foo+',
[vl.Name('foo'), vl.Operator('+')]),
('+foo',
[vl.Operator('+'), vl.Name('foo')]),
('foo+bar',
[vl.Name('foo'), vl.Operator('+'), vl.Name('bar')]),
('foo?bar',
[vl.Name('foo?'), vl.Name('bar')]),
('foo?!bar',
[vl.Name('foo?'), vl.Operator('!'), vl.Name('bar')]),
])
def test_infix_ops(line: str, correct_lexemes: List[vl.Lexeme]):
assert vl.lex_line(line) == correct_lexemes
# COMMAS
@pytest.mark.parametrize('line,correct_lexemes', [
(',',
[vl.COMMA]),
('foo,',
[vl.Name('foo'), vl.COMMA]),
('foo,bar',
[vl.Name('foo'), vl.COMMA, vl.Name('bar')]),
('foo, bar ,baz',
[vl.Name('foo'), vl.COMMA, vl.Name('bar'), vl.COMMA, vl.Name('baz')]),
('foo(bar, baz)',
[vl.Name('foo'), vl.OPEN_PAREN, vl.Name('bar'), vl.COMMA, vl.Name('baz'), vl.CLOSE_PAREN]),
('foo(bar,)',
[vl.Name('foo'), vl.OPEN_PAREN, vl.Name('bar'), vl.COMMA, vl.CLOSE_PAREN]),
])
def test_commas(line: str, correct_lexemes: List[vl.Lexeme]):
assert vl.lex_line(line) == correct_lexemes
###############################################################################
#
# LEXING TEXT
#
###############################################################################
# MULTI-LINE LEXING
@pytest.mark.parametrize('text,correct_lexemes', [
('\n'.join((
'def foo(arg):',
' return bar()')),
[vl.ReservedName('def'), vl.Name('foo'), vl.OPEN_PAREN, vl.Name('arg'), vl.CLOSE_PAREN, vl.COLON,
vl.NEWLINE, vl.INDENT, vl.ReservedName('return'), vl.Name('bar'), vl.OPEN_PAREN, vl.CLOSE_PAREN, vl.NEWLINE,
vl.DEDENT, vl.ENDMARKER]),
('\n'.join((
'def foo(arg1, arg2):',
' return bar(arg1, arg2,)')),
[vl.ReservedName('def'), vl.Name('foo'), vl.OPEN_PAREN, vl.Name('arg1'), vl.COMMA, vl.Name('arg2'), vl.CLOSE_PAREN,
vl.COLON,
vl.NEWLINE, vl.INDENT, vl.ReservedName('return'), vl.Name('bar'), vl.OPEN_PAREN, vl.Name('arg1'), vl.COMMA,
vl.Name('arg2'), vl.COMMA, vl.CLOSE_PAREN, vl.NEWLINE, vl.DEDENT, vl.ENDMARKER]),
])
def test_multiple_lines(text: str, correct_lexemes: List[vl.Lexeme]):
assert vl.lex_lines(text) == correct_lexemes
###############################################################################
#
# FILES
#
###############################################################################
def _generate_files_and_modules():
results = []
cur_dir = os.path.dirname(__file__)
viper_files_dir = os.path.join(cur_dir, 'viper_files')
lexeme_files_dir = os.path.join(cur_dir, 'lexeme_files')
if not os.path.isdir(viper_files_dir) or not os.path.isdir(lexeme_files_dir):
return results
for viper_file in (os.path.join(viper_files_dir, file)
for file in os.listdir(viper_files_dir) if file.endswith('.viper')):
basename = os.path.splitext(os.path.basename(viper_file))[0]
if f'{basename}.py' not in os.listdir(lexeme_files_dir):
continue
module_name = f'.{os.path.basename(lexeme_files_dir)}.{basename}'
results.append((viper_file, module_name))
return results
@pytest.mark.parametrize('viper_file,module_name', _generate_files_and_modules())
def test_files(viper_file: str, module_name: str):
module = import_module(module_name, 'tests')
correct = module.lexemes
lexemes = vl.lex_file(viper_file)
assert lexemes == correct
###############################################################################
#
# MISCELLANY
#
###############################################################################
# LEXEMES
def test_lexemes():
assert vl.Name('foo') == vl.Name('foo')
assert vl.Indent() == vl.INDENT
assert str(vl.Name('foo')) == 'foo'
assert repr(vl.Name('foo')) == 'Name(foo)'
assert repr(vl.INDENT) == 'Indent'
|
pdarragh/Viper
|
tests/test_lexer.py
|
Python
|
apache-2.0
| 7,284
|
"""
Webapp core app.
"""
|
redhat-cip/numeter
|
web-app/numeter_webapp/core/__init__.py
|
Python
|
agpl-3.0
| 25
|
"""Query for a nonexistent language to see the resulting error message."""
from ask_api_examples import make_query
query = '[[Programming language::xxyyzz]]'
def main():
r = make_query(query, __file__)
return r
if __name__ == '__main__':
print main()
|
mdpiper/csdms-wiki-api-examples
|
ask_api_examples/show_query_error.py
|
Python
|
mit
| 269
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ServiceObjectivePaged(Paged):
"""
A paging container for iterating over a list of ServiceObjective object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ServiceObjective]'}
}
def __init__(self, *args, **kwargs):
super(ServiceObjectivePaged, self).__init__(*args, **kwargs)
|
SUSE/azure-sdk-for-python
|
azure-mgmt-sql/azure/mgmt/sql/models/service_objective_paged.py
|
Python
|
mit
| 906
|
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import os
import sys
import urllib
#get defaults and folders/folder helpers
addon = xbmcaddon.Addon()
addonID = addon.getAddonInfo('id')
def ensure_dir(d):
if not os.path.exists(d):
os.makedirs(d)
ADDON_PATH = addon.getAddonInfo('path').decode('utf-8')
RESOURCES_PATH = os.path.join(
xbmc.translatePath(ADDON_PATH),
'resources',
)
MEDIA_PATH = os.path.join(
xbmc.translatePath(ADDON_PATH),
'resources',
'media'
)
ICON_PATH = os.path.join(
xbmc.translatePath(ADDON_PATH),
'resources',
'media',
'steam.png'
)
USER_DATA = xbmc.translatePath("special://profile/addon_data/"+addonID)
ensure_dir(USER_DATA)
#translation helper
def translation(id):
return str(addon.getLocalizedString(id).encode('utf-8'))
#parse params
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
#add index entry
def addIndex(name, url, icon):
addDir(name, sys.argv[0]+"?"+url, os.path.join(MEDIA_PATH, icon), os.path.join(MEDIA_PATH, "background.jpg"))
#add mac address entry
def addMac(mac):
addDir(mac, sys.argv[0]+"?mode=wake&mac="+mac.replace(":", "-"), os.path.join(MEDIA_PATH, "pc.png"), os.path.join(MEDIA_PATH, "background.jpg"))
#add client entry
def addClient(hostname, username, authenticated=True):
if authenticated:
addDir(username + " @ " + hostname, sys.argv[0]+"?mode=games&hostname="+urllib.quote_plus(hostname)+"&username="+urllib.quote_plus(username), os.path.join(MEDIA_PATH, "pc.png"), os.path.join(MEDIA_PATH, "background.jpg"))
else:
addDir(username + " @ " + hostname, sys.argv[0]+"?mode=noauth&hostname="+urllib.quote_plus(hostname)+"&username="+urllib.quote_plus(username), os.path.join(MEDIA_PATH, "pc.png"), os.path.join(MEDIA_PATH, "background.jpg"))
#add game entry
def addGame(name, app_id, state, username, hostname, thump=None, fanart=None):
#non steam games do not have fanart
if fanart == None:
fanart = os.path.join(MEDIA_PATH, "background.jpg")
if thump == None:
thump = os.path.join(MEDIA_PATH, "game.png")
context = []
context.append(("Achievements", 'Container.Update(plugin://plugin.program.steam.streaming/?mode=achievements&hostname=&appid='+str(app_id)+"&username="+urllib.quote_plus(username)+"&hostname="+urllib.quote_plus(hostname)+')',))
#storefront api is broken
#context.append(("Trailers", 'Container.Update(plugin://plugin.program.steam.streaming/?mode=movies&hostname=&appid='+str(app_id)+"&username="+urllib.quote_plus(username)+"&hostname="+urllib.quote_plus(hostname)+')',))
#grey out uninstalled entries
if state == -1:
addDir(name, sys.argv[0]+"?mode=game&id="+str(app_id)+"&username="+urllib.quote_plus(username)+"&hostname="+urllib.quote_plus(hostname), thump, fanart, "22FFFFFF", context)
else:
addDir(name, sys.argv[0]+"?mode=game&id="+str(app_id)+"&username="+urllib.quote_plus(username)+"&hostname="+urllib.quote_plus(hostname), thump, fanart, contextMenus=context)
#add achievement entry
def addAchievement(name, desc, unlocked, hostname, username, thump, fanart):
if unlocked:
addDir(name, sys.argv[0]+"?mode=games&hostname="+urllib.quote_plus(hostname)+"&username="+urllib.quote_plus(username), thump, fanart, desc=desc)
else:
addDir(name, sys.argv[0]+"?mode=games&hostname="+urllib.quote_plus(hostname)+"&username="+urllib.quote_plus(username), thump, fanart, "22FFFFFF", desc=desc)
#add movie entry
#utils.addMovie(movie["name"], movie["thumbnail"], movie["webm"]["max"])
def addMovie(name, thump, url):
addDir(name, url, thump, thump, isFolder=False)
#global entry helper function
def addDir(name, url, thump=None, background=None, color=None, contextMenus=None, desc=None, isFolder=True):
liz = None
formatted_name = name
if color:
formatted_name = "[COLOR "+color+"]"+name+"[/COLOR]"
if thump:
liz = xbmcgui.ListItem(formatted_name, iconImage=thump, thumbnailImage=thump)
if background:
liz.setProperty('fanart_image', background)
else:
liz = xbmcgui.ListItem(formatted_name, iconImage="DefaultFolder.png")
liz.setInfo(type="Video", infoLabels={"Title": name})
if desc != None:
liz.setLabel2(desc)
if contextMenus != None:
liz.addContextMenuItems(contextMenus)
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=liz, isFolder=isFolder)
|
Drakulix/plugin.program.steam.streaming
|
utils.py
|
Python
|
mit
| 4,747
|
from peewee import *
from app import Account, PageView
DEFAULT_ACCOUNT_ID = 1
class Report(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def top_pages_by_time_period(self, interval='day'):
"""
Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9
"""
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url))
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples())
def user_agents(self):
"""
Retrieve user-agents, sorted by most common to least common.
"""
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples())
def languages(self):
"""
Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon.
"""
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def trail(self):
"""
Get all visitors by IP and then list the pages they visited in order.
"""
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip))
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
|
coleifer/peewee
|
examples/analytics/reports.py
|
Python
|
mit
| 4,400
|
from urlparse import urlparse
PROTOCOL_TO_PORT = {
'http': 80,
'https': 443,
}
def client_from_config(
client_cls, configuration, prefix='unicorehub.', **kwargs):
settings = dict((key[len(prefix):], value)
for key, value in configuration.iteritems()
if key.startswith(prefix))
settings.update(kwargs)
# password renamed to key - don't break old configs
if 'app_password' in settings:
settings['app_key'] = settings['app_password']
del settings['app_password']
return client_cls(**settings)
def same_origin(url1, url2):
''' Return True if the urls have the same origin, else False.
Copied from Django:
https://github.com/django/django/blob/master/django/utils/http.py#L255
'''
p1, p2 = urlparse(url1), urlparse(url2)
try:
o1 = (p1.scheme, p1.hostname, p1.port or PROTOCOL_TO_PORT[p1.scheme])
o2 = (p2.scheme, p2.hostname, p2.port or PROTOCOL_TO_PORT[p2.scheme])
return o1 == o2
except (ValueError, KeyError):
return False
|
universalcore/unicore.hub.client
|
unicore/hub/client/utils.py
|
Python
|
bsd-2-clause
| 1,081
|
from flask import request, render_template
from flask.ext.login import current_user
def index():
return render_template('index.html', active_page='index')
|
PhoenixRacing/PhoenixRacingWebApp-noregrets
|
application/controllers/index.py
|
Python
|
bsd-3-clause
| 156
|
from rest_framework import serializers
class MailTestSerializer(serializers.Serializer):
EMAIL_HOST = serializers.CharField(max_length=1024, required=True)
EMAIL_PORT = serializers.IntegerField(default=25)
EMAIL_HOST_USER = serializers.CharField(max_length=1024)
EMAIL_HOST_PASSWORD = serializers.CharField(required=False, allow_blank=True)
EMAIL_FROM = serializers.CharField(required=False, allow_blank=True)
EMAIL_RECIPIENT = serializers.CharField(required=False, allow_blank=True)
EMAIL_USE_SSL = serializers.BooleanField(default=False)
EMAIL_USE_TLS = serializers.BooleanField(default=False)
class LDAPTestSerializer(serializers.Serializer):
AUTH_LDAP_SERVER_URI = serializers.CharField(max_length=1024)
AUTH_LDAP_BIND_DN = serializers.CharField(max_length=1024, required=False, allow_blank=True)
AUTH_LDAP_BIND_PASSWORD = serializers.CharField(required=False, allow_blank=True)
AUTH_LDAP_SEARCH_OU = serializers.CharField()
AUTH_LDAP_SEARCH_FILTER = serializers.CharField()
AUTH_LDAP_USER_ATTR_MAP = serializers.CharField()
AUTH_LDAP_START_TLS = serializers.BooleanField(required=False)
class LDAPUserSerializer(serializers.Serializer):
id = serializers.CharField()
username = serializers.CharField()
name = serializers.CharField()
email = serializers.CharField()
existing = serializers.BooleanField(read_only=True)
class PublicSettingSerializer(serializers.Serializer):
data = serializers.DictField(read_only=True)
|
sdgdsffdsfff/jumpserver
|
apps/settings/serializers.py
|
Python
|
gpl-2.0
| 1,513
|
#!/usr/bin/env python
from convoy.meta import main
main()
|
adamlincoln/Bookie
|
scripts/js/generate_meta.py
|
Python
|
agpl-3.0
| 60
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
import copy
class Instance(object):
"""
Instance object use in all algorithm module
Parameters
----------
inst_id : int, the id of the instance, reserved fields in this version
weight: float, the weight of the instance
feature : object, ndarray or SparseVector Object in this version
label: None of float, data label
"""
def __init__(self, inst_id=None, weight=None, features=None, label=None):
self.inst_id = inst_id
self.weight = weight
self.features = features
self.label = label
def set_weight(self, weight=1.0):
self.weight = weight
def set_label(self, label=1):
self.label = label
def set_feature(self, features):
self.features = features
def copy(self, exclusive_attr=None):
keywords = {"inst_id", "weight", "features", "label"}
if exclusive_attr:
keywords -= set(exclusive_attr)
copy_obj = Instance()
for key in keywords:
if key in exclusive_attr:
continue
attr = getattr(self, key)
setattr(copy_obj, key, attr)
return copy_obj
@property
def with_inst_id(self):
return self.inst_id is not None
@staticmethod
def is_instance():
return True
|
FederatedAI/FATE
|
python/federatedml/feature/instance.py
|
Python
|
apache-2.0
| 2,143
|
#
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from six.moves import cPickle
import gzip
import os
import sys
def is_gz(filename):
ext = os.path.splitext(filename)[-1]
return ext == '.gz'
def is_pkl(filename):
if is_gz(filename):
filename = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[-1]
return ext == '.pkl'
def my_open(filename):
opener = open
if is_gz(filename):
opener = gzip.open
return opener
def pickle(variable, filename, dir=''):
full_filename = os.path.join(dir, filename)
opener = my_open(full_filename)
with opener(full_filename, 'wb') as fh:
cPickle.dump(variable, fh)
def unpickle(filename, dir=''):
full_filename = os.path.join(dir, filename)
opener = my_open(full_filename)
with opener(full_filename, 'rb') as fh:
variable = cPickle.load(fh)
return variable
def rm_local(path, DEBUG=False):
cmd_str = 'rm -rf %s'
cmd_str %= path
if DEBUG:
print(cmd_str)
else:
os.system(cmd_str)
return
def ensure_dir(dir):
try:
os.makedirs(dir)
except Exception as e:
if e.strerror.upper()=='FILE EXISTS':
pass
else:
sys.stderr.write('Could not create dir: %s\n' % dir)
raise e
return
|
probcomp/crosscat
|
src/utils/file_utils.py
|
Python
|
apache-2.0
| 2,073
|
# -*- coding: utf8 -*-
from yanntricks import *
def ratrap():
pspict,fig = SinglePicture("ratrap")
pspict.dilatation(1)
a=1
b=2
O=Point(0,0)
c1 = Circle(O,a).parametric_curve(0,pi/2)
c2 = Circle(O,b).parametric_curve(0,pi/2)
surface = SurfaceBetweenParametricCurves(c1,c2,interval=(0,pi/2))
surface.parameters.hatched()
surface.parameters.hatch.color="blue"
surface.parameters.color = "blue"
Ax = Point(a,0)
Bx = Point(b,0)
Ay = Point(0,a)
By = Point(0,b)
Ax.put_mark(0.2,text="$a$",pspict=pspict,position="N")
Bx.put_mark(0.2,text="$b$",pspict=pspict,position="N")
Ay.put_mark(0.2,text="$a$",pspict=pspict,position="E")
By.put_mark(0.2,text="$b$",pspict=pspict,position="E")
pspict.DrawGraphs(surface,Ax,Ay,Bx,By)
pspict.axes.no_graduation()
pspict.DrawDefaultAxes()
pspict.comment="Une surface hachurée en bleu"
fig.conclude()
fig.write_the_file()
|
LaurentClaessens/mazhe
|
src_yanntricks/yanntricksratrap.py
|
Python
|
gpl-3.0
| 959
|
# -*- coding: utf-8 -*-
from __future__ import division
import geohash
from haversine import haversine
from .trie import Trie, KeyNotFound
DEFAULT_PRECISION = 10
PRECISION_LEVELS = (
(0.00925, 12),
(0.074, 11),
(0.6, 10),
(2.4, 9),
(19, 8),
(76, 7),
(610, 6),
(2400, 5),
(20000, 4),
(78000, 3),
(630000, 2),
(2500000, 1)
)
class GeoTrie(object):
def __init__(self, points=None, precision=DEFAULT_PRECISION):
"""Initializes the geotrie.
:param points: A list of (point, data) tuples to initialize the trie with.
:param precision: The geohash precision for encoding the points.
"""
self._precision = precision
self.trie = Trie()
if points is not None:
self.add_many(points)
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, value):
raise NotImplementedError
def add(self, point, data=None):
"""Adds a new point to the trie.
:param point: A coordinate tuple.
:param data: Optional data to store with the point.
"""
encoded = geohash.encode(*point, precision=self.precision)
self.trie.add(encoded, (point, data))
def add_many(self, tuples):
"""Adds many points to the trie.
:param tuples: A list of (point, data) tuples. Set data to `None` to not store
any data with the points.
"""
for point, data in tuples:
self.add(point, data=data)
def radius_search(self, center, radius):
"""Returns all points that are `radius` distance apart from the center.
:param center: A coordinate tuple.
:param radius: The radius in meters.
"""
precision = min(self.precision, self._radius_to_precision(radius))
center_hash = geohash.encode(*center, precision=precision)
hits = self._get_hits(center_hash)
return [(p, d) for p, d in hits if haversine(p, center) <= (radius / 1000)]
def _get_hits(self, center_hash):
hits = []
for hash_ in geohash.expand(center_hash):
try:
hits.extend(self.trie.values_for_prefix(hash_))
except KeyNotFound:
pass
return hits
def _radius_to_precision(self, radius):
for size, precision in PRECISION_LEVELS:
if size >= radius / 2:
return precision
return None
|
alexmic/geotrie-python
|
geotrie/__init__.py
|
Python
|
mit
| 2,494
|
# -*- coding: utf-8 -*-
from resources.lib.modules import cache
class Indexer:
def get_live_channels(self):
import scraper_live as scraper
live = scraper.get_live_channels()
return live
def get_vod(self):
import scraper_vod as scraper
vod = scraper.get_globoplay_channels()
for item in vod:
item["brplayprovider"] = "globoplay"
return vod
def get_channel_categories(self):
import scraper_vod as scraper
categories, programs = cache.get(scraper.get_globo_programs, 1)
return categories
def get_extra_categories(self):
import scraper_vod as scraper
categories = scraper.get_extra_sections()
return categories
def get_category_programs(self, category, subcategory=None):
import scraper_vod as scraper
categories, category_programs = cache.get(scraper.get_globo_programs, 1)
if type(category) != unicode:
category = unicode(category, 'UTF-8')
category_data = next(category_program for category_program in category_programs if category_program['category'] == category)
if subcategory is not None:
return next(category_program for category_program in category_data['subcategories'] if category_program['category'] == subcategory)['programs']
else:
return category_data['programs']
def get_category_subcategories(self, category):
import scraper_vod as scraper
categories, category_programs = cache.get(scraper.get_globo_programs, 1)
if type(category) != unicode:
category = unicode(category, 'UTF-8')
subcategories = next(category_program for category_program in category_programs if category_program['category'] == category)['subcategories']
return [subcategory['category'] for subcategory in subcategories]
def get_videos_by_category(self, category, page=1):
import scraper_vod as scraper
episodes, next_page, total_pages = scraper.get_globo_extra_episodes(category, page)
for episode in episodes:
episode['brplayprovider'] = 'globoplay'
return episodes, next_page, total_pages
def get_videos_by_program(self, program_id, page=1, bingewatch=False):
import scraper_vod as scraper
from resources.lib.modules import control
episodes, nextpage, total_pages, days = scraper.get_globo_episodes(program_id, page, bingewatch) if control.setting("globo_play_full_videos") == 'true' else scraper.get_globo_partial_episodes(program_id, page, bingewatch)
for episode in episodes:
episode['brplayprovider'] = 'globoplay'
return episodes, nextpage, total_pages, days
def get_videos_by_program_date(self, program_id, date, bingewatch=False):
import scraper_vod as scraper
episodes = scraper.get_globo_episodes_by_date(program_id, date, bingewatch)
for episode in episodes:
episode['brplayprovider'] = 'globoplay'
return episodes
def get_program_dates(self, program_id):
import scraper_vod as scraper
days, last = scraper.get_program_dates(program_id)
return days
def search(self, q, page=1):
import scraper_vod as scraper
return scraper.search(q, page)
def get_states(self):
import scraper_vod as scraper
return cache.get(scraper.get_states, 1)
def get_regions(self, state):
import scraper_vod as scraper
return cache.get(scraper.get_regions, 1, state)
def get_programs_by_region(self, region):
import scraper_vod as scraper
return cache.get(scraper.get_programs_by_region, 1, region)
def get_4k(self):
import scraper_vod as scraper
return cache.get(scraper.get_4k, 1)
|
bruno-briner/plugin.video.brplay
|
resources/lib/modules/globoplay/indexer.py
|
Python
|
gpl-3.0
| 3,863
|
"""
Copyright 2016 Brandon Michael Hoffman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from fossil import version, virtual_dirs, virtual_files
class TestVirtualDirs(object):
virt_dir = virtual_dirs.VirtualDir("test_dir1", [
virtual_dirs.VirtualDir("test_dir2")
])
def test_init(self):
assert self.virt_dir.name == "test_dir1"
assert len(self.virt_dir.children) == 1
assert self.virt_dir.children[0].name == "test_dir2"
def test_str(self):
assert str(self.virt_dir) == "test_dir1\n test_dir2\n"
expected_results = " test_dir1\n test_dir2\n"
assert self.virt_dir.__str__(level=1) == expected_results
class TestTemplateDirs(TestVirtualDirs):
virt_dir = virtual_dirs.TemplateDir("test_dir1", [
virtual_dirs.TemplateDir("test_dir2")
])
def test_render(self):
results = self.virt_dir.render()
assert len(results) == 1
assert results[0].__class__ == virtual_dirs.VirtualDir
assert results[0].name == "test_dir1"
assert len(results[0].children) == 1
assert results[0].children[0].__class__ == virtual_dirs.VirtualDir
assert results[0].children[0].name == "test_dir2"
virt_dir2 = virtual_dirs.TemplateDir("test_dir1,test_dir2", [
virtual_dirs.TemplateDir("test_dir3,test_dir4"),
virtual_files.TemplateFile("test_file1,test_file2", "test_content")
])
results2 = virt_dir2.render()
assert len(results2) == 2
assert results2[0].__class__ == virtual_dirs.VirtualDir
assert results2[0].name == "test_dir1"
assert results2[1].__class__ == virtual_dirs.VirtualDir
assert results2[1].name == "test_dir2"
assert len(results2[0].children) == 4
assert len(results2[1].children) == 4
virt_dir3 = virtual_dirs.TemplateDir("{{name}}_dir1,{{name}}_dir2", [
virtual_dirs.TemplateDir(
"{{name}}_dir3,{{fossil.directory.name}}.test"
),
virtual_files.TemplateFile(
"{{fossil.directory.name}}.{{name}}_file1,test",
"{{fossil.directory.name}}.{{fossil.file.name}}")
])
results3 = virt_dir3.render(name="test")
assert len(results3) == 2
assert results3[0].__class__ == virtual_dirs.VirtualDir
assert results3[0].name == "test_dir1"
assert results3[1].__class__ == virtual_dirs.VirtualDir
assert results3[1].name == "test_dir2"
assert len(results3[0].children) == 4
assert len(results3[1].children) == 4
assert results3[1].children[1].name == "test_dir2.test"
assert results3[1].children[2].name == "test_dir2.test_file1"
expected_result = "test_dir2.test_dir2.test_file1"
assert results3[1].children[2].content == expected_result
assert results3[1].children[3].content == "test_dir2.test"
virt_dir4 = virtual_dirs.TemplateDir(" ", [
virtual_dirs.TemplateDir("test"),
])
results4 = virt_dir4.render(name="test")
assert len(results4) == 0
class TestVirtualRoot(object):
virt_root = virtual_dirs.VirtualRoot([
virtual_dirs.VirtualDir("test_dir")
])
def test_init(self):
assert len(self.virt_root.children) == 1
assert self.virt_root.children[0].name == 'test_dir'
def test_str(self):
assert str(self.virt_root) == "-root\n test_dir\n"
expected_result = " -root\n test_dir\n"
assert self.virt_root.__str__(level=1) == expected_result
class TestTemplateRoot(TestVirtualRoot):
virt_root = virtual_dirs.TemplateRoot([
virtual_dirs.TemplateDir("test_dir")
])
def test_render(self):
results = self.virt_root.render()
assert results.__class__ == virtual_dirs.VirtualRoot
assert len(results.children) == 1
virt_root2 = virtual_dirs.TemplateRoot([
virtual_dirs.TemplateDir("{{name}}_dir"),
virtual_files.TemplateFile("{{name}}_file", "")
])
results2 = virt_root2.render(name="test")
assert len(results2.children) == 2
assert results2.children[0].name == "test_dir"
assert results2.children[1].name == "test_file"
virt_file = virtual_files.TemplateFile(
"file_{{fossil.version}}",
"{{fossil.directory.name}}.{{fossil.file.name}}.{{fossil.version}}"
)
virt_root3 = virtual_dirs.TemplateRoot([
virtual_dirs.TemplateDir("test_dir_{{fossil.version}}", [
virt_file
])
])
results3 = virt_root3.render(name="test")
assert len(results3.children) == 1
assert results3.children[0].name == "test_dir_" + str(version.VERSION)
expected_result = "file_" + str(version.VERSION)
assert results3.children[0].children[0].name == expected_result
expected_result = "test_dir_{version}.file_{version}.{version}".format(
version=version.VERSION
)
assert results3.children[0].children[0].content == expected_result
|
BrandonHoffman/python-fossil
|
tests/test_virtual_dirs.py
|
Python
|
apache-2.0
| 5,631
|
# Copyright 2013 OpenStack Foundation
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
import time
from oslo_config import cfg
from manila.common import constants
from manila import context
from manila.db import api as db_api
from manila import exception
from manila.network.neutron import api as neutron_api
from manila.network.neutron import constants as neutron_constants
from manila.network.neutron import neutron_network_plugin as plugin
from manila import test
from manila.tests import utils as test_utils
CONF = cfg.CONF
fake_neutron_port = {
"status": "ACTIVE",
"allowed_address_pairs": [],
"admin_state_up": True,
"network_id": "test_net_id",
"tenant_id": "fake_tenant_id",
"extra_dhcp_opts": [],
"device_owner": "test",
"binding:capabilities": {"port_filter": True},
"mac_address": "test_mac",
"fixed_ips": [
{"subnet_id": "test_subnet_id", "ip_address": "203.0.113.100"},
],
"id": "test_port_id",
"security_groups": ["fake_sec_group_id"],
"device_id": "fake_device_id",
}
fake_neutron_network = {
'admin_state_up': True,
'availability_zone_hints': [],
'availability_zones': ['nova'],
'description': '',
'id': 'fake net id',
'ipv4_address_scope': None,
'ipv6_address_scope': None,
'name': 'test_neutron_network',
'port_security_enabled': True,
'provider:network_type': 'vxlan',
'provider:physical_network': None,
'provider:segmentation_id': 1234,
'router:external': False,
'shared': False,
'status': 'ACTIVE',
'subnets': ['fake subnet id',
'fake subnet id 2'],
}
fake_share_network = {
'id': 'fake nw info id',
'neutron_subnet_id': fake_neutron_network['subnets'][0],
'neutron_net_id': fake_neutron_network['id'],
'project_id': 'fake project id',
'status': 'test_subnet_status',
'name': 'fake name',
'description': 'fake description',
'security_services': [],
'network_type': 'fake_network_type',
'segmentation_id': 1234,
'ip_version': 4,
'cidr': 'fake_cidr',
'gateway': 'fake_gateway',
'mtu': 1509,
}
fake_share_server = {
'id': 'fake nw info id',
'status': 'test_server_status',
'host': 'fake@host',
'network_allocations': [],
'shares': [],
}
fake_network_allocation = {
'id': fake_neutron_port['id'],
'share_server_id': fake_share_server['id'],
'ip_address': fake_neutron_port['fixed_ips'][0]['ip_address'],
'mac_address': fake_neutron_port['mac_address'],
'status': constants.STATUS_ACTIVE,
'label': 'user',
'network_type': fake_share_network['network_type'],
'segmentation_id': fake_share_network['segmentation_id'],
'ip_version': fake_share_network['ip_version'],
'cidr': fake_share_network['cidr'],
'gateway': fake_share_network['gateway'],
'mtu': 1509,
}
fake_nw_info = {
'segments': [
{
'provider:network_type': 'vlan',
'provider:physical_network': 'net1',
'provider:segmentation_id': 3926,
},
{
'provider:network_type': 'vxlan',
'provider:physical_network': None,
'provider:segmentation_id': 2000,
},
],
'mtu': 1509,
}
fake_neutron_network_multi = {
'admin_state_up': True,
'availability_zone_hints': [],
'availability_zones': ['nova'],
'description': '',
'id': 'fake net id',
'ipv4_address_scope': None,
'ipv6_address_scope': None,
'name': 'test_neutron_network',
'port_security_enabled': True,
'router:external': False,
'shared': False,
'status': 'ACTIVE',
'subnets': ['fake subnet id',
'fake subnet id 2'],
'segments': fake_nw_info['segments'],
'mtu': fake_nw_info['mtu'],
}
fake_share_network_multi = {
'id': 'fake nw info id',
'neutron_subnet_id': fake_neutron_network_multi['subnets'][0],
'neutron_net_id': fake_neutron_network_multi['id'],
'project_id': 'fake project id',
'status': 'test_subnet_status',
'name': 'fake name',
'description': 'fake description',
'security_services': [],
'ip_version': 4,
'cidr': 'fake_cidr',
'gateway': 'fake_gateway',
'mtu': fake_neutron_network_multi['mtu'],
}
fake_network_allocation_multi = {
'id': fake_neutron_port['id'],
'share_server_id': fake_share_server['id'],
'ip_address': fake_neutron_port['fixed_ips'][0]['ip_address'],
'mac_address': fake_neutron_port['mac_address'],
'status': constants.STATUS_ACTIVE,
'label': 'user',
'network_type': None,
'segmentation_id': None,
'ip_version': fake_share_network_multi['ip_version'],
'cidr': fake_share_network_multi['cidr'],
'gateway': 'fake_gateway',
'mtu': fake_share_network_multi['mtu'],
}
fake_binding_profile = {
'neutron_switch_id': 'fake switch id',
'neutron_port_id': 'fake port id',
'neutron_switch_info': 'fake switch info'
}
class NeutronNetworkPluginTest(test.TestCase):
def setUp(self):
super(NeutronNetworkPluginTest, self).setUp()
self.plugin = self._get_neutron_network_plugin_instance()
self.plugin.db = db_api
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
def _get_neutron_network_plugin_instance(self, config_data=None):
if config_data is None:
return plugin.NeutronNetworkPlugin()
with test_utils.create_temp_config_with_opts(config_data):
return plugin.NeutronNetworkPlugin()
@mock.patch.object(db_api, 'network_allocation_create',
mock.Mock(return_values=fake_network_allocation))
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test_allocate_network_one_allocation(self):
has_provider_nw_ext = mock.patch.object(
self.plugin, '_has_provider_network_extension').start()
has_provider_nw_ext.return_value = True
save_nw_data = mock.patch.object(self.plugin,
'_save_neutron_network_data').start()
save_subnet_data = mock.patch.object(
self.plugin,
'_save_neutron_subnet_data').start()
with mock.patch.object(self.plugin.neutron_api, 'create_port',
mock.Mock(return_value=fake_neutron_port)):
self.plugin.allocate_network(
self.fake_context,
fake_share_server,
fake_share_network,
allocation_info={'count': 1})
has_provider_nw_ext.assert_any_call()
save_nw_data.assert_called_once_with(self.fake_context,
fake_share_network)
save_subnet_data.assert_called_once_with(self.fake_context,
fake_share_network)
self.plugin.neutron_api.create_port.assert_called_once_with(
fake_share_network['project_id'],
network_id=fake_share_network['neutron_net_id'],
subnet_id=fake_share_network['neutron_subnet_id'],
device_owner='manila:share',
device_id=fake_share_network['id'])
db_api.network_allocation_create.assert_called_once_with(
self.fake_context,
fake_network_allocation)
has_provider_nw_ext.stop()
save_nw_data.stop()
save_subnet_data.stop()
@mock.patch.object(db_api, 'network_allocation_create',
mock.Mock(return_values=fake_network_allocation))
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test_allocate_network_two_allocation(self):
has_provider_nw_ext = mock.patch.object(
self.plugin, '_has_provider_network_extension').start()
has_provider_nw_ext.return_value = True
save_nw_data = mock.patch.object(self.plugin,
'_save_neutron_network_data').start()
save_subnet_data = mock.patch.object(
self.plugin,
'_save_neutron_subnet_data').start()
with mock.patch.object(self.plugin.neutron_api, 'create_port',
mock.Mock(return_value=fake_neutron_port)):
self.plugin.allocate_network(
self.fake_context,
fake_share_server,
fake_share_network,
count=2)
neutron_api_calls = [
mock.call(fake_share_network['project_id'],
network_id=fake_share_network['neutron_net_id'],
subnet_id=fake_share_network['neutron_subnet_id'],
device_owner='manila:share',
device_id=fake_share_network['id']),
mock.call(fake_share_network['project_id'],
network_id=fake_share_network['neutron_net_id'],
subnet_id=fake_share_network['neutron_subnet_id'],
device_owner='manila:share',
device_id=fake_share_network['id']),
]
db_api_calls = [
mock.call(self.fake_context, fake_network_allocation),
mock.call(self.fake_context, fake_network_allocation)
]
self.plugin.neutron_api.create_port.assert_has_calls(
neutron_api_calls)
db_api.network_allocation_create.assert_has_calls(db_api_calls)
has_provider_nw_ext.stop()
save_nw_data.stop()
save_subnet_data.stop()
@mock.patch.object(db_api, 'share_network_update', mock.Mock())
def test_allocate_network_create_port_exception(self):
has_provider_nw_ext = mock.patch.object(
self.plugin, '_has_provider_network_extension').start()
has_provider_nw_ext.return_value = True
save_nw_data = mock.patch.object(self.plugin,
'_save_neutron_network_data').start()
save_subnet_data = mock.patch.object(
self.plugin,
'_save_neutron_subnet_data').start()
create_port = mock.patch.object(self.plugin.neutron_api,
'create_port').start()
create_port.side_effect = exception.NetworkException
self.assertRaises(exception.NetworkException,
self.plugin.allocate_network,
self.fake_context,
fake_share_server,
fake_share_network)
has_provider_nw_ext.stop()
save_nw_data.stop()
save_subnet_data.stop()
create_port.stop()
@mock.patch.object(db_api, 'network_allocation_delete', mock.Mock())
@mock.patch.object(db_api, 'share_network_update', mock.Mock())
@mock.patch.object(db_api, 'network_allocations_get_for_share_server',
mock.Mock(return_value=[fake_network_allocation]))
def test_deallocate_network_nominal(self):
share_srv = {'id': fake_share_server['id']}
share_srv['network_allocations'] = [fake_network_allocation]
with mock.patch.object(self.plugin.neutron_api, 'delete_port',
mock.Mock()):
self.plugin.deallocate_network(self.fake_context, share_srv)
self.plugin.neutron_api.delete_port.assert_called_once_with(
fake_network_allocation['id'])
db_api.network_allocation_delete.assert_called_once_with(
self.fake_context,
fake_network_allocation['id'])
@mock.patch.object(db_api, 'share_network_update',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'network_allocation_update', mock.Mock())
@mock.patch.object(db_api, 'network_allocations_get_for_share_server',
mock.Mock(return_value=[fake_network_allocation]))
def test_deallocate_network_neutron_api_exception(self):
share_srv = {'id': fake_share_server['id']}
share_srv['network_allocations'] = [fake_network_allocation]
delete_port = mock.patch.object(self.plugin.neutron_api,
'delete_port').start()
delete_port.side_effect = exception.NetworkException
self.assertRaises(exception.NetworkException,
self.plugin.deallocate_network,
self.fake_context,
share_srv)
db_api.network_allocation_update.assert_called_once_with(
self.fake_context,
fake_network_allocation['id'],
{'status': constants.STATUS_ERROR})
delete_port.stop()
@mock.patch.object(db_api, 'share_network_update', mock.Mock())
def test_save_neutron_network_data(self):
neutron_nw_info = {
'provider:network_type': 'vlan',
'provider:segmentation_id': 1000,
'mtu': 1509,
}
share_nw_update_dict = {
'network_type': 'vlan',
'segmentation_id': 1000,
'mtu': 1509,
}
with mock.patch.object(self.plugin.neutron_api,
'get_network',
mock.Mock(return_value=neutron_nw_info)):
self.plugin._save_neutron_network_data(self.fake_context,
fake_share_network)
self.plugin.neutron_api.get_network.assert_called_once_with(
fake_share_network['neutron_net_id'])
self.plugin.db.share_network_update.assert_called_once_with(
self.fake_context,
fake_share_network['id'],
share_nw_update_dict)
@mock.patch.object(db_api, 'share_network_update', mock.Mock())
def test_save_neutron_network_data_multi_segment(self):
share_nw_update_dict = {
'network_type': 'vlan',
'segmentation_id': 3926,
'mtu': 1509
}
config_data = {
'DEFAULT': {
'neutron_physical_net_name': 'net1',
}
}
self.mock_object(self.plugin.neutron_api, 'get_network')
self.plugin.neutron_api.get_network.return_value = fake_nw_info
with test_utils.create_temp_config_with_opts(config_data):
self.plugin._save_neutron_network_data(self.fake_context,
fake_share_network)
self.plugin.neutron_api.get_network.assert_called_once_with(
fake_share_network['neutron_net_id'])
self.plugin.db.share_network_update.assert_called_once_with(
self.fake_context,
fake_share_network['id'],
share_nw_update_dict)
@mock.patch.object(db_api, 'share_network_update', mock.Mock())
def test_save_neutron_network_data_multi_segment_without_ident(self):
config_data = {
'DEFAULT': {
'neutron_physical_net_name': 'net100',
}
}
self.mock_object(self.plugin.neutron_api, 'get_network')
self.plugin.neutron_api.get_network.return_value = fake_nw_info
with test_utils.create_temp_config_with_opts(config_data):
self.assertRaises(exception.NetworkBadConfigurationException,
self.plugin._save_neutron_network_data,
self.fake_context, fake_share_network)
@mock.patch.object(db_api, 'share_network_update', mock.Mock())
def test_save_neutron_network_data_multi_segment_without_cfg(self):
self.mock_object(self.plugin.neutron_api, 'get_network')
self.plugin.neutron_api.get_network.return_value = fake_nw_info
self.assertRaises(exception.NetworkBadConfigurationException,
self.plugin._save_neutron_network_data,
self.fake_context, fake_share_network)
@mock.patch.object(db_api, 'share_network_update', mock.Mock())
def test_save_neutron_subnet_data(self):
neutron_subnet_info = {
'cidr': '10.0.0.0/24',
'ip_version': 4,
'gateway_ip': '10.0.0.1',
}
subnet_value = {
'cidr': '10.0.0.0/24',
'ip_version': 4,
'gateway': '10.0.0.1',
}
with mock.patch.object(self.plugin.neutron_api,
'get_subnet',
mock.Mock(return_value=neutron_subnet_info)):
self.plugin._save_neutron_subnet_data(self.fake_context,
fake_share_network)
self.plugin.neutron_api.get_subnet.assert_called_once_with(
fake_share_network['neutron_subnet_id'])
self.plugin.db.share_network_update.assert_called_once_with(
self.fake_context,
fake_share_network['id'],
subnet_value)
def test_has_network_provider_extension_true(self):
extensions = {neutron_constants.PROVIDER_NW_EXT: {}}
with mock.patch.object(self.plugin.neutron_api,
'list_extensions',
mock.Mock(return_value=extensions)):
result = self.plugin._has_provider_network_extension()
self.plugin.neutron_api.list_extensions.assert_any_call()
self.assertTrue(result)
def test_has_network_provider_extension_false(self):
with mock.patch.object(self.plugin.neutron_api,
'list_extensions',
mock.Mock(return_value={})):
result = self.plugin._has_provider_network_extension()
self.plugin.neutron_api.list_extensions.assert_any_call()
self.assertFalse(result)
@ddt.ddt
class NeutronSingleNetworkPluginTest(test.TestCase):
def setUp(self):
super(NeutronSingleNetworkPluginTest, self).setUp()
self.context = 'fake_context'
def test_init_valid(self):
fake_net_id = 'fake_net_id'
fake_subnet_id = 'fake_subnet_id'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_net_id,
'neutron_subnet_id': fake_subnet_id,
}
}
fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]}
self.mock_object(
neutron_api.API, 'get_network', mock.Mock(return_value=fake_net))
with test_utils.create_temp_config_with_opts(config_data):
instance = plugin.NeutronSingleNetworkPlugin()
self.assertEqual(fake_net_id, instance.net)
self.assertEqual(fake_subnet_id, instance.subnet)
neutron_api.API.get_network.assert_called_once_with(fake_net_id)
@ddt.data(
{'net': None, 'subnet': None},
{'net': 'fake_net_id', 'subnet': None},
{'net': None, 'subnet': 'fake_subnet_id'})
@ddt.unpack
def test_init_invalid(self, net, subnet):
config_data = dict()
# Simulate absence of set values
if net:
config_data['neutron_net_id'] = net
if subnet:
config_data['neutron_subnet_id'] = subnet
config_data = dict(DEFAULT=config_data)
with test_utils.create_temp_config_with_opts(config_data):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.NeutronSingleNetworkPlugin)
@ddt.data({}, {'subnets': []}, {'subnets': ['different_foo_subnet']})
def test_init_subnet_does_not_belong_to_net(self, fake_net):
fake_net_id = 'fake_net_id'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_net_id,
'neutron_subnet_id': 'fake_subnet_id',
}
}
self.mock_object(
neutron_api.API, 'get_network', mock.Mock(return_value=fake_net))
with test_utils.create_temp_config_with_opts(config_data):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.NeutronSingleNetworkPlugin)
neutron_api.API.get_network.assert_called_once_with(fake_net_id)
def _get_neutron_network_plugin_instance(self, config_data=None):
if not config_data:
fake_subnet_id = 'fake_subnet_id'
config_data = {
'DEFAULT': {
'neutron_net_id': 'fake_net_id',
'neutron_subnet_id': fake_subnet_id,
}
}
fake_net = {'subnets': [fake_subnet_id]}
self.mock_object(
neutron_api.API, 'get_network',
mock.Mock(return_value=fake_net))
with test_utils.create_temp_config_with_opts(config_data):
instance = plugin.NeutronSingleNetworkPlugin()
return instance
def test___update_share_network_net_data_same_values(self):
instance = self._get_neutron_network_plugin_instance()
share_network = {
'neutron_net_id': instance.net,
'neutron_subnet_id': instance.subnet,
}
result = instance._update_share_network_net_data(
self.context, share_network)
self.assertEqual(share_network, result)
def test___update_share_network_net_data_different_values_empty(self):
instance = self._get_neutron_network_plugin_instance()
share_network_input = {
'id': 'fake_share_network_id',
}
share_network_result = {
'neutron_net_id': instance.net,
'neutron_subnet_id': instance.subnet,
}
self.mock_object(
instance.db, 'share_network_update',
mock.Mock(return_value='foo'))
instance._update_share_network_net_data(
self.context, share_network_input)
instance.db.share_network_update.assert_called_once_with(
self.context, share_network_input['id'], share_network_result)
@ddt.data(
{'n': 'fake_net_id', 's': 'bar'},
{'n': 'foo', 's': 'fake_subnet_id'})
@ddt.unpack
def test___update_share_network_net_data_different_values(self, n, s):
instance = self._get_neutron_network_plugin_instance()
share_network = {
'id': 'fake_share_network_id',
'neutron_net_id': n,
'neutron_subnet_id': s,
}
self.mock_object(
instance.db, 'share_network_update',
mock.Mock(return_value=share_network))
self.assertRaises(
exception.NetworkBadConfigurationException,
instance._update_share_network_net_data,
self.context, share_network)
self.assertFalse(instance.db.share_network_update.called)
def test_allocate_network(self):
self.mock_object(plugin.NeutronNetworkPlugin, 'allocate_network')
plugin.NeutronNetworkPlugin.allocate_network.return_value = [
fake_neutron_port, fake_neutron_port]
instance = self._get_neutron_network_plugin_instance()
share_server = 'fake_share_server'
share_network = 'fake_share_network'
share_network_upd = 'updated_fake_share_network'
count = 2
device_owner = 'fake_device_owner'
self.mock_object(
instance, '_update_share_network_net_data',
mock.Mock(return_value=share_network_upd))
instance.allocate_network(
self.context, share_server, share_network, count=count,
device_owner=device_owner)
instance._update_share_network_net_data.assert_called_once_with(
self.context, share_network)
plugin.NeutronNetworkPlugin.allocate_network.assert_called_once_with(
self.context, share_server, share_network_upd, count=count,
device_owner=device_owner)
@ddt.ddt
class NeutronBindNetworkPluginTest(test.TestCase):
def setUp(self):
super(NeutronBindNetworkPluginTest, self).setUp()
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
self.has_binding_ext_mock = self.mock_object(
neutron_api.API, '_has_port_binding_extension')
self.has_binding_ext_mock.return_value = True
self.bind_plugin = self._get_neutron_network_plugin_instance()
self.bind_plugin.db = db_api
self.sleep_mock = self.mock_object(time, 'sleep')
self.fake_share_network_multi = dict(fake_share_network_multi)
def _get_neutron_network_plugin_instance(self, config_data=None):
if config_data is None:
return plugin.NeutronBindNetworkPlugin()
with test_utils.create_temp_config_with_opts(config_data):
return plugin.NeutronBindNetworkPlugin()
def test_wait_for_bind(self):
self.mock_object(self.bind_plugin.neutron_api, 'show_port')
self.bind_plugin.neutron_api.show_port.return_value = fake_neutron_port
self.bind_plugin._wait_for_ports_bind([fake_neutron_port],
fake_share_server)
self.bind_plugin.neutron_api.show_port.assert_called_once_with(
fake_neutron_port['id'])
self.sleep_mock.assert_not_called()
def test_wait_for_bind_error(self):
fake_neut_port = copy.copy(fake_neutron_port)
fake_neut_port['status'] = 'ERROR'
self.mock_object(self.bind_plugin.neutron_api, 'show_port')
self.bind_plugin.neutron_api.show_port.return_value = fake_neut_port
self.assertRaises(exception.NetworkException,
self.bind_plugin._wait_for_ports_bind,
[fake_neut_port, fake_neut_port],
fake_share_server)
self.bind_plugin.neutron_api.show_port.assert_called_once_with(
fake_neutron_port['id'])
self.sleep_mock.assert_not_called()
@ddt.data(('DOWN', 'ACTIVE'), ('DOWN', 'DOWN'), ('ACTIVE', 'DOWN'))
def test_wait_for_bind_two_ports_no_bind(self, state):
fake_neut_port1 = copy.copy(fake_neutron_port)
fake_neut_port1['status'] = state[0]
fake_neut_port2 = copy.copy(fake_neutron_port)
fake_neut_port2['status'] = state[1]
self.mock_object(self.bind_plugin.neutron_api, 'show_port')
self.bind_plugin.neutron_api.show_port.side_effect = (
[fake_neut_port1, fake_neut_port2] * 20)
self.assertRaises(exception.NetworkBindException,
self.bind_plugin._wait_for_ports_bind,
[fake_neut_port1, fake_neut_port2],
fake_share_server)
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test_allocate_network_one_allocation(self):
self.mock_object(self.bind_plugin, '_has_provider_network_extension')
self.bind_plugin._has_provider_network_extension.return_value = True
save_nw_data = self.mock_object(self.bind_plugin,
'_save_neutron_network_data')
save_subnet_data = self.mock_object(self.bind_plugin,
'_save_neutron_subnet_data')
self.mock_object(self.bind_plugin, '_wait_for_ports_bind')
neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1]
self.mock_object(neutron_host_id_opts, 'default')
neutron_host_id_opts.default = 'foohost1'
self.mock_object(db_api, 'network_allocation_create')
db_api.network_allocation_create.return_value = fake_network_allocation
self.mock_object(self.bind_plugin.neutron_api, 'get_network')
self.bind_plugin.neutron_api.get_network.return_value = (
fake_neutron_network)
with mock.patch.object(self.bind_plugin.neutron_api, 'create_port',
mock.Mock(return_value=fake_neutron_port)):
self.bind_plugin.allocate_network(
self.fake_context,
fake_share_server,
fake_share_network,
allocation_info={'count': 1})
self.bind_plugin._has_provider_network_extension.assert_any_call()
save_nw_data.assert_called_once_with(self.fake_context,
fake_share_network)
save_subnet_data.assert_called_once_with(self.fake_context,
fake_share_network)
expected_kwargs = {
'binding:vnic_type': 'baremetal',
'host_id': 'foohost1',
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:share',
'device_id': fake_share_network['id'],
}
self.bind_plugin.neutron_api.create_port.assert_called_once_with(
fake_share_network['project_id'], **expected_kwargs)
db_api.network_allocation_create.assert_called_once_with(
self.fake_context,
fake_network_allocation)
self.bind_plugin._wait_for_ports_bind.assert_called_once_with(
[db_api.network_allocation_create()], fake_share_server)
@mock.patch.object(db_api, 'network_allocation_create',
mock.Mock(return_values=fake_network_allocation_multi))
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network_multi))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test_allocate_network_multi_segment(self):
network_allocation_update_data = {
'network_type':
fake_nw_info['segments'][0]['provider:network_type'],
'segmentation_id':
fake_nw_info['segments'][0]['provider:segmentation_id'],
}
network_update_data = dict(network_allocation_update_data)
network_update_data['mtu'] = fake_nw_info['mtu']
fake_network_allocation_multi_updated = dict(
fake_network_allocation_multi)
fake_network_allocation_multi_updated.update(
network_allocation_update_data)
fake_share_network_multi_updated = dict(fake_share_network_multi)
fake_share_network_multi_updated.update(network_update_data)
config_data = {
'DEFAULT': {
'neutron_net_id': 'fake net id',
'neutron_subnet_id': 'fake subnet id',
'neutron_physical_net_name': 'net1',
}
}
self.bind_plugin = self._get_neutron_network_plugin_instance(
config_data)
self.bind_plugin.db = db_api
self.mock_object(self.bind_plugin, '_has_provider_network_extension')
self.bind_plugin._has_provider_network_extension.return_value = True
save_subnet_data = self.mock_object(self.bind_plugin,
'_save_neutron_subnet_data')
self.mock_object(self.bind_plugin, '_wait_for_ports_bind')
neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1]
self.mock_object(neutron_host_id_opts, 'default')
neutron_host_id_opts.default = 'foohost1'
self.mock_object(db_api, 'network_allocation_create')
db_api.network_allocation_create.return_value = (
fake_network_allocation_multi)
self.mock_object(db_api, 'network_allocation_update')
db_api.network_allocation_update.return_value = (
fake_network_allocation_multi_updated)
self.mock_object(self.bind_plugin.neutron_api, 'get_network')
self.bind_plugin.neutron_api.get_network.return_value = (
fake_neutron_network_multi)
self.mock_object(db_api, 'share_network_update')
with mock.patch.object(self.bind_plugin.neutron_api, 'create_port',
mock.Mock(return_value=fake_neutron_port)):
self.bind_plugin.allocate_network(
self.fake_context,
fake_share_server,
self.fake_share_network_multi,
allocation_info={'count': 1})
self.bind_plugin._has_provider_network_extension.assert_any_call()
save_subnet_data.assert_called_once_with(
self.fake_context,
fake_share_network_multi_updated)
expected_kwargs = {
'binding:vnic_type': 'baremetal',
'host_id': 'foohost1',
'network_id': fake_share_network_multi['neutron_net_id'],
'subnet_id': fake_share_network_multi['neutron_subnet_id'],
'device_owner': 'manila:share',
'device_id': fake_share_network_multi['id']
}
self.bind_plugin.neutron_api.create_port.assert_called_once_with(
fake_share_network_multi['project_id'], **expected_kwargs)
db_api.network_allocation_create.assert_called_once_with(
self.fake_context,
fake_network_allocation_multi)
db_api.share_network_update.assert_called_once_with(
self.fake_context,
fake_share_network_multi['id'],
network_update_data)
network_allocation_update_data['cidr'] = (
fake_share_network_multi['cidr'])
network_allocation_update_data['ip_version'] = (
fake_share_network_multi['ip_version'])
db_api.network_allocation_update.assert_called_once_with(
self.fake_context,
fake_neutron_port['id'],
network_allocation_update_data)
@ddt.data({
'neutron_binding_profiles': None,
'binding_profiles': {}
}, {
'neutron_binding_profiles': 'fake_profile',
'binding_profiles': {}
}, {
'neutron_binding_profiles': 'fake_profile',
'binding_profiles': None
}, {
'neutron_binding_profiles': 'fake_profile',
'binding_profiles': {
'fake_profile': {
'neutron_switch_id': 'fake switch id',
'neutron_port_id': 'fake port id',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
}
}
}, {
'neutron_binding_profiles': None,
'binding_profiles': {
'fake_profile': {
'neutron_switch_id': 'fake switch id',
'neutron_port_id': 'fake port id',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
}
}
}, {
'neutron_binding_profiles': 'fake_profile_one,fake_profile_two',
'binding_profiles': {
'fake_profile_one': {
'neutron_switch_id': 'fake switch id 1',
'neutron_port_id': 'fake port id 1',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
},
'fake_profile_two': {
'neutron_switch_id': 'fake switch id 2',
'neutron_port_id': 'fake port id 2',
'neutron_switch_info': 'switch_ip: 127.0.0.2'
}
}
}, {
'neutron_binding_profiles': 'fake_profile_two',
'binding_profiles': {
'fake_profile_one': {
'neutron_switch_id': 'fake switch id 1',
'neutron_port_id': 'fake port id 1',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
},
'fake_profile_two': {
'neutron_switch_id': 'fake switch id 2',
'neutron_port_id': 'fake port id 2',
'neutron_switch_info': 'switch_ip: 127.0.0.2'
}
}
})
@ddt.unpack
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test__get_port_create_args(self, neutron_binding_profiles,
binding_profiles):
fake_device_owner = 'share'
fake_host_id = 'fake host'
neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1]
self.mock_object(neutron_host_id_opts, 'default')
neutron_host_id_opts.default = fake_host_id
config_data = {
'DEFAULT': {
'neutron_net_id': fake_neutron_network['id'],
'neutron_subnet_id': fake_neutron_network['subnets'][0]
}
}
# Simulate absence of set values
if neutron_binding_profiles:
config_data['DEFAULT'][
'neutron_binding_profiles'] = neutron_binding_profiles
if binding_profiles:
for name, binding_profile in binding_profiles.items():
config_data[name] = binding_profile
instance = self._get_neutron_network_plugin_instance(config_data)
create_args = instance._get_port_create_args(fake_share_server,
fake_share_network,
fake_device_owner)
expected_create_args = {
'binding:vnic_type': 'baremetal',
'host_id': fake_host_id,
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:' + fake_device_owner,
'device_id': fake_share_server['id']
}
if neutron_binding_profiles:
expected_create_args['binding:profile'] = {
'local_link_information': []
}
local_links = expected_create_args[
'binding:profile']['local_link_information']
for profile in neutron_binding_profiles.split(','):
if binding_profiles is None:
binding_profile = {}
else:
binding_profile = binding_profiles.get(profile, {})
local_links.append({
'port_id': binding_profile.get('neutron_port_id', None),
'switch_id': binding_profile.get('neutron_switch_id', None)
})
switch_info = binding_profile.get('neutron_switch_info', None)
if switch_info is None:
local_links[-1]['switch_info'] = None
else:
local_links[-1]['switch_info'] = cfg.types.Dict()(
switch_info)
self.assertEqual(expected_create_args, create_args)
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test__get_port_create_args_host_id(self):
fake_device_owner = 'share'
fake_host_id = 'fake host'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_neutron_network['id'],
'neutron_subnet_id': fake_neutron_network['subnets'][0],
'neutron_host_id': fake_host_id
}
}
instance = self._get_neutron_network_plugin_instance(config_data)
create_args = instance._get_port_create_args(fake_share_server,
fake_share_network,
fake_device_owner)
expected_create_args = {
'binding:vnic_type': 'baremetal',
'host_id': fake_host_id,
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:' + fake_device_owner,
'device_id': fake_share_server['id']
}
self.assertEqual(expected_create_args, create_args)
@ddt.ddt
class NeutronBindSingleNetworkPluginTest(test.TestCase):
def setUp(self):
super(NeutronBindSingleNetworkPluginTest, self).setUp()
self.context = 'fake_context'
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
self.has_binding_ext_mock = self.mock_object(
neutron_api.API, '_has_port_binding_extension')
self.has_binding_ext_mock.return_value = True
self.bind_plugin = plugin.NeutronBindNetworkPlugin()
self.bind_plugin.db = db_api
self.sleep_mock = self.mock_object(time, 'sleep')
self.bind_plugin = self._get_neutron_network_plugin_instance()
self.bind_plugin.db = db_api
def _get_neutron_network_plugin_instance(self, config_data=None):
if not config_data:
fake_net_id = 'fake net id'
fake_subnet_id = 'fake subnet id'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_net_id,
'neutron_subnet_id': fake_subnet_id,
'neutron_physical_net_name': 'net1',
}
}
fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]}
self.mock_object(
neutron_api.API, 'get_network',
mock.Mock(return_value=fake_net))
with test_utils.create_temp_config_with_opts(config_data):
return plugin.NeutronBindSingleNetworkPlugin()
def test_allocate_network(self):
self.mock_object(plugin.NeutronNetworkPlugin, 'allocate_network')
plugin.NeutronNetworkPlugin.allocate_network.return_value = [
'port1', 'port2']
instance = self._get_neutron_network_plugin_instance()
share_server = 'fake_share_server'
share_network = {'neutron_net_id': {}}
share_network_upd = {'neutron_net_id': {'upd': True}}
count = 2
device_owner = 'fake_device_owner'
self.mock_object(
instance, '_update_share_network_net_data',
mock.Mock(return_value=share_network_upd))
self.mock_object(instance, '_wait_for_ports_bind', mock.Mock())
instance.allocate_network(
self.context, share_server, share_network, count=count,
device_owner=device_owner)
instance._update_share_network_net_data.assert_called_once_with(
self.context, share_network)
plugin.NeutronNetworkPlugin.allocate_network.assert_called_once_with(
self.context, share_server, share_network_upd, count=count,
device_owner=device_owner)
instance._wait_for_ports_bind.assert_called_once_with(
['port1', 'port2'], share_server)
def test_init_valid(self):
fake_net_id = 'fake_net_id'
fake_subnet_id = 'fake_subnet_id'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_net_id,
'neutron_subnet_id': fake_subnet_id,
}
}
fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]}
self.mock_object(
neutron_api.API, 'get_network', mock.Mock(return_value=fake_net))
with test_utils.create_temp_config_with_opts(config_data):
instance = plugin.NeutronSingleNetworkPlugin()
self.assertEqual(fake_net_id, instance.net)
self.assertEqual(fake_subnet_id, instance.subnet)
neutron_api.API.get_network.assert_called_once_with(fake_net_id)
@ddt.data(
{'net': None, 'subnet': None},
{'net': 'fake_net_id', 'subnet': None},
{'net': None, 'subnet': 'fake_subnet_id'})
@ddt.unpack
def test_init_invalid(self, net, subnet):
config_data = dict()
# Simulate absence of set values
if net:
config_data['neutron_net_id'] = net
if subnet:
config_data['neutron_subnet_id'] = subnet
config_data = dict(DEFAULT=config_data)
with test_utils.create_temp_config_with_opts(config_data):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.NeutronSingleNetworkPlugin)
@ddt.data({}, {'subnets': []}, {'subnets': ['different_foo_subnet']})
def test_init_subnet_does_not_belong_to_net(self, fake_net):
fake_net_id = 'fake_net_id'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_net_id,
'neutron_subnet_id': 'fake_subnet_id',
}
}
self.mock_object(
neutron_api.API, 'get_network', mock.Mock(return_value=fake_net))
with test_utils.create_temp_config_with_opts(config_data):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.NeutronSingleNetworkPlugin)
neutron_api.API.get_network.assert_called_once_with(fake_net_id)
def _get_neutron_single_network_plugin_instance(self):
fake_subnet_id = 'fake_subnet_id'
config_data = {
'DEFAULT': {
'neutron_net_id': 'fake_net_id',
'neutron_subnet_id': fake_subnet_id,
}
}
fake_net = {'subnets': [fake_subnet_id]}
self.mock_object(
neutron_api.API, 'get_network', mock.Mock(return_value=fake_net))
with test_utils.create_temp_config_with_opts(config_data):
instance = plugin.NeutronSingleNetworkPlugin()
return instance
def test___update_share_network_net_data_same_values(self):
instance = self._get_neutron_single_network_plugin_instance()
share_network = {
'neutron_net_id': instance.net,
'neutron_subnet_id': instance.subnet,
}
result = instance._update_share_network_net_data(
self.context, share_network)
self.assertEqual(share_network, result)
def test___update_share_network_net_data_different_values_empty(self):
instance = self._get_neutron_single_network_plugin_instance()
share_network_input = {
'id': 'fake_share_network_id',
}
share_network_result = {
'neutron_net_id': instance.net,
'neutron_subnet_id': instance.subnet,
}
self.mock_object(
instance.db, 'share_network_update',
mock.Mock(return_value='foo'))
instance._update_share_network_net_data(
self.context, share_network_input)
instance.db.share_network_update.assert_called_once_with(
self.context, share_network_input['id'], share_network_result)
@ddt.data(
{'n': 'fake_net_id', 's': 'bar'},
{'n': 'foo', 's': 'fake_subnet_id'})
@ddt.unpack
def test___update_share_network_net_data_different_values(self, n, s):
instance = self._get_neutron_single_network_plugin_instance()
share_network = {
'id': 'fake_share_network_id',
'neutron_net_id': n,
'neutron_subnet_id': s,
}
self.mock_object(
instance.db, 'share_network_update',
mock.Mock(return_value=share_network))
self.assertRaises(
exception.NetworkBadConfigurationException,
instance._update_share_network_net_data,
self.context, share_network)
self.assertFalse(instance.db.share_network_update.called)
def test_wait_for_bind(self):
self.mock_object(self.bind_plugin.neutron_api, 'show_port')
self.bind_plugin.neutron_api.show_port.return_value = fake_neutron_port
self.bind_plugin._wait_for_ports_bind([fake_neutron_port],
fake_share_server)
self.bind_plugin.neutron_api.show_port.assert_called_once_with(
fake_neutron_port['id'])
self.sleep_mock.assert_not_called()
def test_wait_for_bind_error(self):
fake_neut_port = copy.copy(fake_neutron_port)
fake_neut_port['status'] = 'ERROR'
self.mock_object(self.bind_plugin.neutron_api, 'show_port')
self.bind_plugin.neutron_api.show_port.return_value = fake_neut_port
self.assertRaises(exception.NetworkException,
self.bind_plugin._wait_for_ports_bind,
[fake_neut_port, fake_neut_port],
fake_share_server)
self.bind_plugin.neutron_api.show_port.assert_called_once_with(
fake_neutron_port['id'])
self.sleep_mock.assert_not_called()
@ddt.data(('DOWN', 'ACTIVE'), ('DOWN', 'DOWN'), ('ACTIVE', 'DOWN'))
def test_wait_for_bind_two_ports_no_bind(self, state):
fake_neut_port1 = copy.copy(fake_neutron_port)
fake_neut_port1['status'] = state[0]
fake_neut_port2 = copy.copy(fake_neutron_port)
fake_neut_port2['status'] = state[1]
self.mock_object(self.bind_plugin.neutron_api, 'show_port')
self.bind_plugin.neutron_api.show_port.side_effect = (
[fake_neut_port1, fake_neut_port2] * 20)
self.assertRaises(exception.NetworkBindException,
self.bind_plugin._wait_for_ports_bind,
[fake_neut_port1, fake_neut_port2],
fake_share_server)
@mock.patch.object(db_api, 'network_allocation_create',
mock.Mock(return_values=fake_network_allocation))
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test_allocate_network_one_allocation(self):
self.mock_object(self.bind_plugin, '_has_provider_network_extension')
self.bind_plugin._has_provider_network_extension.return_value = True
save_nw_data = self.mock_object(self.bind_plugin,
'_save_neutron_network_data')
save_subnet_data = self.mock_object(self.bind_plugin,
'_save_neutron_subnet_data')
self.mock_object(self.bind_plugin, '_wait_for_ports_bind')
neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1]
self.mock_object(neutron_host_id_opts, 'default')
neutron_host_id_opts.default = 'foohost1'
self.mock_object(db_api, 'network_allocation_create')
with mock.patch.object(self.bind_plugin.neutron_api, 'create_port',
mock.Mock(return_value=fake_neutron_port)):
self.bind_plugin.allocate_network(
self.fake_context,
fake_share_server,
fake_share_network,
allocation_info={'count': 1})
self.bind_plugin._has_provider_network_extension.assert_any_call()
save_nw_data.assert_called_once_with(self.fake_context,
fake_share_network)
save_subnet_data.assert_called_once_with(self.fake_context,
fake_share_network)
expected_kwargs = {
'binding:vnic_type': 'baremetal',
'host_id': 'foohost1',
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:share',
'device_id': fake_share_network['id'],
}
self.bind_plugin.neutron_api.create_port.assert_called_once_with(
fake_share_network['project_id'], **expected_kwargs)
db_api.network_allocation_create.assert_called_once_with(
self.fake_context,
fake_network_allocation)
self.bind_plugin._wait_for_ports_bind.assert_called_once_with(
[db_api.network_allocation_create()], fake_share_server)
@ddt.data({
'neutron_binding_profiles': None,
'binding_profiles': {}
}, {
'neutron_binding_profiles': 'fake_profile',
'binding_profiles': {}
}, {
'neutron_binding_profiles': 'fake_profile',
'binding_profiles': None
}, {
'neutron_binding_profiles': 'fake_profile',
'binding_profiles': {
'fake_profile': {
'neutron_switch_id': 'fake switch id',
'neutron_port_id': 'fake port id',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
}
}
}, {
'neutron_binding_profiles': None,
'binding_profiles': {
'fake_profile': {
'neutron_switch_id': 'fake switch id',
'neutron_port_id': 'fake port id',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
}
}
}, {
'neutron_binding_profiles': 'fake_profile_one,fake_profile_two',
'binding_profiles': {
'fake_profile_one': {
'neutron_switch_id': 'fake switch id 1',
'neutron_port_id': 'fake port id 1',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
},
'fake_profile_two': {
'neutron_switch_id': 'fake switch id 2',
'neutron_port_id': 'fake port id 2',
'neutron_switch_info': 'switch_ip: 127.0.0.2'
}
}
}, {
'neutron_binding_profiles': 'fake_profile_two',
'binding_profiles': {
'fake_profile_one': {
'neutron_switch_id': 'fake switch id 1',
'neutron_port_id': 'fake port id 1',
'neutron_switch_info': 'switch_ip: 127.0.0.1'
},
'fake_profile_two': {
'neutron_switch_id': 'fake switch id 2',
'neutron_port_id': 'fake port id 2',
'neutron_switch_info': 'switch_ip: 127.0.0.2'
}
}
})
@ddt.unpack
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test__get_port_create_args(self, neutron_binding_profiles,
binding_profiles):
fake_device_owner = 'share'
fake_host_id = 'fake host'
neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1]
self.mock_object(neutron_host_id_opts, 'default')
neutron_host_id_opts.default = fake_host_id
config_data = {
'DEFAULT': {
'neutron_net_id': fake_neutron_network['id'],
'neutron_subnet_id': fake_neutron_network['subnets'][0]
}
}
# Simulate absence of set values
if neutron_binding_profiles:
config_data['DEFAULT'][
'neutron_binding_profiles'] = neutron_binding_profiles
if binding_profiles:
for name, binding_profile in binding_profiles.items():
config_data[name] = binding_profile
instance = self._get_neutron_network_plugin_instance(config_data)
create_args = instance._get_port_create_args(fake_share_server,
fake_share_network,
fake_device_owner)
expected_create_args = {
'binding:vnic_type': 'baremetal',
'host_id': fake_host_id,
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:' + fake_device_owner,
'device_id': fake_share_server['id']
}
if neutron_binding_profiles:
expected_create_args['binding:profile'] = {
'local_link_information': []
}
local_links = expected_create_args[
'binding:profile']['local_link_information']
for profile in neutron_binding_profiles.split(','):
if binding_profiles is None:
binding_profile = {}
else:
binding_profile = binding_profiles.get(profile, {})
local_links.append({
'port_id': binding_profile.get('neutron_port_id', None),
'switch_id': binding_profile.get('neutron_switch_id', None)
})
switch_info = binding_profile.get('neutron_switch_info', None)
if switch_info is None:
local_links[-1]['switch_info'] = None
else:
local_links[-1]['switch_info'] = cfg.types.Dict()(
switch_info)
self.assertEqual(expected_create_args, create_args)
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test__get_port_create_args_host_id(self):
fake_device_owner = 'share'
fake_host_id = 'fake host'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_neutron_network['id'],
'neutron_subnet_id': fake_neutron_network['subnets'][0],
'neutron_host_id': fake_host_id
}
}
instance = self._get_neutron_network_plugin_instance(config_data)
create_args = instance._get_port_create_args(fake_share_server,
fake_share_network,
fake_device_owner)
expected_create_args = {
'binding:vnic_type': 'baremetal',
'host_id': fake_host_id,
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:' + fake_device_owner,
'device_id': fake_share_server['id']
}
self.assertEqual(expected_create_args, create_args)
class NeutronBindNetworkPluginWithNormalTypeTest(test.TestCase):
def setUp(self):
super(NeutronBindNetworkPluginWithNormalTypeTest, self).setUp()
config_data = {
'DEFAULT': {
'neutron_vnic_type': 'normal',
}
}
self.plugin = plugin.NeutronNetworkPlugin()
self.plugin.db = db_api
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
with test_utils.create_temp_config_with_opts(config_data):
self.bind_plugin = plugin.NeutronBindNetworkPlugin()
self.bind_plugin.db = db_api
@mock.patch.object(db_api, 'network_allocation_create',
mock.Mock(return_values=fake_network_allocation))
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test_allocate_network_one_allocation(self):
self.mock_object(self.bind_plugin, '_has_provider_network_extension')
self.bind_plugin._has_provider_network_extension.return_value = True
save_nw_data = self.mock_object(self.bind_plugin,
'_save_neutron_network_data')
save_subnet_data = self.mock_object(self.bind_plugin,
'_save_neutron_subnet_data')
self.mock_object(self.bind_plugin, '_wait_for_ports_bind')
neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1]
self.mock_object(neutron_host_id_opts, 'default')
neutron_host_id_opts.default = 'foohost1'
self.mock_object(db_api, 'network_allocation_create')
multi_seg = self.mock_object(
self.bind_plugin, '_is_neutron_multi_segment')
multi_seg.return_value = False
with mock.patch.object(self.bind_plugin.neutron_api, 'create_port',
mock.Mock(return_value=fake_neutron_port)):
self.bind_plugin.allocate_network(
self.fake_context,
fake_share_server,
fake_share_network,
allocation_info={'count': 1})
self.bind_plugin._has_provider_network_extension.assert_any_call()
save_nw_data.assert_called_once_with(self.fake_context,
fake_share_network)
save_subnet_data.assert_called_once_with(self.fake_context,
fake_share_network)
expected_kwargs = {
'binding:vnic_type': 'normal',
'host_id': 'foohost1',
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:share',
'device_id': fake_share_network['id'],
}
self.bind_plugin.neutron_api.create_port.assert_called_once_with(
fake_share_network['project_id'], **expected_kwargs)
db_api.network_allocation_create.assert_called_once_with(
self.fake_context,
fake_network_allocation)
self.bind_plugin._wait_for_ports_bind.assert_not_called()
def test_update_network_allocation(self):
self.mock_object(self.bind_plugin, '_wait_for_ports_bind')
self.mock_object(db_api, 'network_allocations_get_for_share_server')
db_api.network_allocations_get_for_share_server.return_value = [
fake_neutron_port]
self.bind_plugin.update_network_allocation(self.fake_context,
fake_share_server)
self.bind_plugin._wait_for_ports_bind.assert_called_once_with(
[fake_neutron_port], fake_share_server)
@ddt.ddt
class NeutronBindSingleNetworkPluginWithNormalTypeTest(test.TestCase):
def setUp(self):
super(NeutronBindSingleNetworkPluginWithNormalTypeTest, self).setUp()
fake_net_id = 'fake net id'
fake_subnet_id = 'fake subnet id'
config_data = {
'DEFAULT': {
'neutron_net_id': fake_net_id,
'neutron_subnet_id': fake_subnet_id,
'neutron_vnic_type': 'normal',
}
}
fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]}
self.mock_object(
neutron_api.API, 'get_network', mock.Mock(return_value=fake_net))
self.plugin = plugin.NeutronNetworkPlugin()
self.plugin.db = db_api
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
with test_utils.create_temp_config_with_opts(config_data):
self.bind_plugin = plugin.NeutronBindSingleNetworkPlugin()
self.bind_plugin.db = db_api
@mock.patch.object(db_api, 'network_allocation_create',
mock.Mock(return_values=fake_network_allocation))
@mock.patch.object(db_api, 'share_network_get',
mock.Mock(return_value=fake_share_network))
@mock.patch.object(db_api, 'share_server_get',
mock.Mock(return_value=fake_share_server))
def test_allocate_network_one_allocation(self):
self.mock_object(self.bind_plugin, '_has_provider_network_extension')
self.bind_plugin._has_provider_network_extension.return_value = True
save_nw_data = self.mock_object(self.bind_plugin,
'_save_neutron_network_data')
save_subnet_data = self.mock_object(self.bind_plugin,
'_save_neutron_subnet_data')
self.mock_object(self.bind_plugin, '_wait_for_ports_bind')
neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1]
self.mock_object(neutron_host_id_opts, 'default')
neutron_host_id_opts.default = 'foohost1'
self.mock_object(db_api, 'network_allocation_create')
with mock.patch.object(self.bind_plugin.neutron_api, 'create_port',
mock.Mock(return_value=fake_neutron_port)):
self.bind_plugin.allocate_network(
self.fake_context,
fake_share_server,
fake_share_network,
allocation_info={'count': 1})
self.bind_plugin._has_provider_network_extension.assert_any_call()
save_nw_data.assert_called_once_with(self.fake_context,
fake_share_network)
save_subnet_data.assert_called_once_with(self.fake_context,
fake_share_network)
expected_kwargs = {
'binding:vnic_type': 'normal',
'host_id': 'foohost1',
'network_id': fake_share_network['neutron_net_id'],
'subnet_id': fake_share_network['neutron_subnet_id'],
'device_owner': 'manila:share',
'device_id': fake_share_network['id'],
}
self.bind_plugin.neutron_api.create_port.assert_called_once_with(
fake_share_network['project_id'], **expected_kwargs)
db_api.network_allocation_create.assert_called_once_with(
self.fake_context,
fake_network_allocation)
self.bind_plugin._wait_for_ports_bind.assert_not_called()
def test_update_network_allocation(self):
self.mock_object(self.bind_plugin, '_wait_for_ports_bind')
self.mock_object(db_api, 'network_allocations_get_for_share_server')
db_api.network_allocations_get_for_share_server.return_value = [
fake_neutron_port]
self.bind_plugin.update_network_allocation(self.fake_context,
fake_share_server)
self.bind_plugin._wait_for_ports_bind.assert_called_once_with(
[fake_neutron_port], fake_share_server)
@ddt.data({'fix_ips': [{'ip_address': 'test_ip'},
{'ip_address': '10.78.223.129'}],
'ip_version': 4},
{'fix_ips': [{'ip_address': 'test_ip'},
{'ip_address': 'ad80::abaa:0:c2:2'}],
'ip_version': 6},
{'fix_ips': [{'ip_address': '10.78.223.129'},
{'ip_address': 'ad80::abaa:0:c2:2'}],
'ip_version': 6},
)
@ddt.unpack
def test__get_matched_ip_address(self, fix_ips, ip_version):
result = self.bind_plugin._get_matched_ip_address(fix_ips, ip_version)
self.assertEqual(fix_ips[1]['ip_address'], result)
@ddt.data({'fix_ips': [{'ip_address': 'test_ip_1'},
{'ip_address': 'test_ip_2'}],
'ip_version': (4, 6)},
{'fix_ips': [{'ip_address': 'ad80::abaa:0:c2:1'},
{'ip_address': 'ad80::abaa:0:c2:2'}],
'ip_version': (4, )},
{'fix_ips': [{'ip_address': '192.0.0.2'},
{'ip_address': '192.0.0.3'}],
'ip_version': (6, )},
{'fix_ips': [{'ip_address': '192.0.0.2/12'},
{'ip_address': '192.0.0.330'},
{'ip_address': 'ad80::001::ad80'},
{'ip_address': 'ad80::abaa:0:c2:2/64'}],
'ip_version': (4, 6)},
)
@ddt.unpack
def test__get_matched_ip_address_illegal(self, fix_ips, ip_version):
for version in ip_version:
self.assertRaises(exception.NetworkBadConfigurationException,
self.bind_plugin._get_matched_ip_address,
fix_ips, version)
|
bswartz/manila
|
manila/tests/network/neutron/test_neutron_plugin.py
|
Python
|
apache-2.0
| 70,016
|
"""
Task
Students of District College have subscription of English and French newspapers. Some students have subscribed to only English, some have subscribed to only French and some have subscribed to both newspapers.
You are given two sets of roll numbers of students, who have subscribed to English and French newspapers. Your task is to find total number of students who have subscribed to both newspapers.
Input Format
First line contains, number of students who have subscribed to English newspaper.
Second line contains, space separated list of roll numbers of students, who have subscribed to English newspaper.
Third line contains, number of students who have subscribed to French newspaper.
Fourth line contains, space separated list of roll numbers of students, who have subscribed to French newspaper.
Constraints
0<Total number of students in college<1000
Output Format
Output total number of students who have subscriptions in both English and French.
Sample Input
9
1 2 3 4 5 6 7 8 9
9
10 1 2 3 11 21 55 6 8
Sample Output
5
Explanation
Roll numbers of students who have both subscriptions:
1, 2, 3, 6 and 8.
Hence, total is 5 students.
"""
n1 = int(raw_input())
english = set(map(int, raw_input().split()))
n2 = int(raw_input())
french = set(map(int, raw_input().split()))
print len(english.intersection(french))
|
spradeepv/dive-into-python
|
hackerrank/domain/python/sets/intersection.py
|
Python
|
mit
| 1,336
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2020 Samuele Carcagno <sam.carcagno@gmail.com>
# This file is part of pychoacoustics
# pychoacoustics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pychoacoustics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pychoacoustics. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .pyqtver import*
if pyqtversion == 4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import QLocale
from PyQt4.QtGui import QDialog, QDialogButtonBox, QFont, QHBoxLayout, QLabel, QPushButton, QSizePolicy, QSpacerItem, QTextBrowser, QVBoxLayout
elif pyqtversion == -4:
from PySide import QtGui, QtCore
from PySide.QtCore import QLocale
from PySide.QtGui import QDialog, QDialogButtonBox, QFont, QHBoxLayout, QLabel, QPushButton, QSizePolicy, QSpacerItem, QTextBrowser, QVBoxLayout
elif pyqtversion == 5:
from PyQt5 import QtGui, QtCore
from PyQt5.QtCore import QLocale
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QHBoxLayout, QLabel, QPushButton, QSizePolicy, QSpacerItem, QTextBrowser, QVBoxLayout
from PyQt5.QtGui import QFont
import random
from .dialog_show_par_diff import*
class dialogMemoryFileParametersDiffer(QDialog):
def __init__(self, parent, text, diffText):
QDialog.__init__(self, parent)
self.prm = self.parent().prm
self.currLocale = self.parent().prm['currentLocale']
self.currLocale.setNumberOptions(self.currLocale.OmitGroupSeparator | self.currLocale.RejectGroupSeparator)
self.diffText = diffText
self.vBoxSizer = QVBoxLayout()
self.hBoxSizer = QHBoxLayout()
self.textTF = QLabel(text)
self.vBoxSizer.addWidget(self.textTF)
self.yesButt = QPushButton(self.tr("Yes"), self)
self.noButt = QPushButton(self.tr("No"), self)
self.showDiffButt = QPushButton(self.tr("Show Differences"), self)
self.cancelButt = QPushButton(self.tr("Cancel"), self)
self.hBoxSizer.addItem(QSpacerItem(10,10, QSizePolicy.Expanding))
self.hBoxSizer.addWidget(self.yesButt)
self.hBoxSizer.addWidget(self.noButt)
self.hBoxSizer.addWidget(self.showDiffButt)
self.hBoxSizer.addWidget(self.cancelButt)
self.vBoxSizer.addLayout(self.hBoxSizer)
self.showDiffButt.clicked.connect(self.onClickShowDiffButt)
self.cancelButt.clicked.connect(self.onClickCancelButt)
self.yesButt.clicked.connect(self.accept)
self.noButt.clicked.connect(self.reject)
self.setLayout(self.vBoxSizer)
self.setWindowTitle(self.tr("Warning"))
self.show()
def onClickShowDiffButt(self):
dia = dialogShowParDiff(self, self.diffText)
def onClickCancelButt(self):
self.parent().exitFlag = False
self.accept()
|
sam81/pychoacoustics
|
pychoacoustics/dialog_memory_file_parameters_differ.py
|
Python
|
gpl-3.0
| 3,413
|
"""
WSGI config for SSECTA project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SSECTA.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Xivid/Programming-Training-Helper
|
SSECTA/wsgi.py
|
Python
|
cc0-1.0
| 387
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.