text stringlengths 0 1.05M | meta dict |
|---|---|
"""A little utility for manually patching in posts."""
import argparse
import codecs
import datetime
import errno
from glob import glob
import os
import pprint
import time
import subprocess
import sys
from rauth import OAuth1Session
import yaml
from models import Post
import tla
aid = os.environ['CIO_AID']
cio_requests = OAuth1Session(os.environ['CIO_KEY'], os.environ['CIO_SECRET'])
def mkdir_p(path):
"""equivalent to mkdir -p.
from: http://stackoverflow.com/a/600612/1231454"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def git_checkout_branch(name):
"""Checkout a git branch. The working tree must be clean.
Raise an exception on failure."""
if subprocess.call(["git", "diff", "--quiet", "HEAD"]) != 0:
raise Exception("Dirty working tree; not checking out %s" % name)
if subprocess.call(["git", "checkout", name]) != 0:
raise Exception("Could not checkout %s" % name)
def _write_out(posts, yaml=True, supporting=False):
for p in posts:
for path, contents in tla.files_to_create(p):
if path.startswith('_posts') and not yaml:
continue
if not path.startswith('_posts') and not supporting:
continue
mkdir_p(os.path.dirname(path))
with codecs.open(path, 'w', 'utf-8') as f:
f.write(contents)
print path
def dl_after(args):
"""Download posts received after args.date and write them to _posts."""
git_checkout_branch('gh-pages')
date = datetime.datetime(*[int(i) for i in args.date.split('-')])
date -= datetime.timedelta(hours=5)
tstamp = time.mktime(date.timetuple())
#Download
msgs = []
req = cio_requests.get(
'https://api.context.io/2.0/accounts/' + aid + '/messages',
params={'folder': 'thelistserve',
'include_body': 1,
'body_type': 'text/plain',
'date_after': tstamp,
'sort_order': 'asc',
}
)
if req.json():
msgs += req.json()
else:
print "did not receive json!"
print "%r" % req.content
print "terminating without writing out"
return
posts = [Post.from_cio_message(m) for m in msgs]
_write_out(posts)
def rebuild_from_yaml(args):
"""Write out all files using yaml representations in ``_posts/*.html``."""
git_checkout_branch('gh-pages')
posts = []
for fname in glob('_posts/*.html'):
with codecs.open(fname, 'r', 'utf-8') as f:
c = f.read()
# we only want the yaml frontmatter
start = c.index('---') + 3
end = c.rindex('---')
frontmatter = yaml.safe_load(c[start:end])
posts.append(Post(**frontmatter['api_data']['post']))
_write_out(posts, yaml=False, supporting=True)
def add_manually(args):
entering = True
posts = []
while entering:
posts.append(get_post_from_user())
entering = raw_input('again? (y/n): ') == 'y'
_write_out(posts)
def get_post_from_user():
ok = False
sentinel = '<stop>'
post_kwargs = {
'subject': None,
'author': None,
'body': None,
'date': None,
}
print "enter %r on a line by itself to end input" % sentinel
while not ok:
for param in post_kwargs.keys():
print
print "%s:" % param
entered = '\n'.join(iter(raw_input, sentinel))
post_kwargs[param] = entered.decode(sys.stdin.encoding)
date_list = [int(i) for i in post_kwargs['date'].split('-')]
post_kwargs['date'] = date_list
print
pprint.pprint(post_kwargs)
ok = raw_input('confirm (y/n) :') == 'y'
return Post(**post_kwargs)
def main():
parser = argparse.ArgumentParser(
description='A tool to manually patch in posts.')
commands = parser.add_subparsers(help='commands')
get_parser = commands.add_parser(
'dl_after',
help='Download posts through cIO.')
get_parser.add_argument('date', help='date in YYYY-MM-DD form')
get_parser.set_defaults(func=dl_after)
rebuild_parser = commands.add_parser(
'rebuild_from_yaml',
help='Rebuild all files from from _posts/*.html.')
rebuild_parser.set_defaults(func=rebuild_from_yaml)
manual_add_parser = commands.add_parser(
'add_manually',
help='Create post files by manually entering post content.')
manual_add_parser.set_defaults(func=add_manually)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| {
"repo_name": "simon-weber/the-listserve-archive",
"path": "bootstrap.py",
"copies": "1",
"size": "4775",
"license": "mit",
"hash": 6065262873768246000,
"line_mean": 24.5347593583,
"line_max": 78,
"alpha_frac": 0.5832460733,
"autogenerated": false,
"ratio": 3.7072981366459627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4790544209945963,
"avg_score": null,
"num_lines": null
} |
"""aliveim URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from landing import views as landing_views
from registration.views import UserRegistrationView, UserRegistrationThanksView
urlpatterns = [
#url(r'^admin/', include(admin.site.urls)),
url(r'^$', landing_views.index, name='index'),
url(r'^register/$', UserRegistrationView.as_view(), name='register'),
url(r'^thanks/$', UserRegistrationThanksView.as_view(), name='thanks'),
]
| {
"repo_name": "aliveim/aliveim",
"path": "aliveim/aliveim/urls.py",
"copies": "1",
"size": "1083",
"license": "mit",
"hash": 1181806000568783000,
"line_mean": 39.1111111111,
"line_max": 79,
"alpha_frac": 0.7109879963,
"autogenerated": false,
"ratio": 3.5742574257425743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9781387397351216,
"avg_score": 0.0007716049382716049,
"num_lines": 27
} |
"""A Live Parsed JSON Object"""
import os
import json
import pickle
import collections
from datetime import datetime
class LiveJSON(collections.MutableMapping):
"Live Parsed JSON Object"
def __init__(self, filepath, use_pickle=False, *args, **kwargs):
self.filepath = filepath
self.use_pickle = use_pickle
self.binary = 'b' if use_pickle else ''
self.last_update = self.__getmtime()
self._store = {}
self.update(dict(*args, **kwargs))
def __getmtime(self):
try:
time = datetime.fromtimestamp(os.path.getmtime(self.filepath))
except IOError:
time = datetime.now()
return time
def __load_json(self):
"""Load properties from file"""
try:
load_type = self.__getmtime() > self.last_update
with open(self.filepath, 'r' + self.binary) as file:
if self.use_pickle:
temp = pickle.load(file)
else:
temp = json.load(file)
if load_type:
self._store = temp
else:
self._store.update(temp)
except IOError:
pass
def __save_json(self):
if self.last_update < self.__getmtime():
self.__load_json()
with open(self.filepath, 'w' + self.binary) as file:
if self.use_pickle:
pickle.dump(self._store, file)
else:
json.dump(self._store, file, indent=2, sort_keys=True)
self.last_update = self.__getmtime()
def __getitem__(self, key):
self.__load_json()
return self._store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self._store[self.__keytransform__(key)] = value
self.__save_json()
def __delitem__(self, key):
if key in self._store:
del self._store[self.__keytransform__(key)]
self.__save_json()
def __iter__(self):
self.__load_json()
return iter(self._store)
def __len__(self):
self.__load_json()
return len(self._store)
def __keytransform__(self, key):
return key
| {
"repo_name": "bayangan1991/pinger",
"path": "livejson/__init__.py",
"copies": "1",
"size": "2210",
"license": "mit",
"hash": 7963780343008490000,
"line_mean": 28.0789473684,
"line_max": 74,
"alpha_frac": 0.5316742081,
"autogenerated": false,
"ratio": 4.138576779026217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 76
} |
"""A live poll for Twitch chat.
Votes are cast with hashtags, i.e. messages starting with the
pound-sign (#) and containing no spaces.
Example:
#thisisavote
"""
from __future__ import division
from pubsub import pub
import wx
import plugins
class Poll(object):
def __init__(self):
self.votes = {}
self.totalvotes = 0
self._choices = {}
def vote(self, voter, choice):
if voter in self.votes:
choice_ = self.votes[voter]
choice_.votes -= 1
if choice_.votes == 0 and not choice_.name == choice:
del self._choices[choice_.name]
else:
self.totalvotes += 1
if not choice in self._choices:
choice_ = Choice(choice)
self._choices[choice] = choice_
else:
choice_ = self._choices[choice]
choice_.votes += 1
self.votes[voter] = choice_
def choices(self):
choices = sorted(
self._choices.values(), key=lambda c: c.votes, reverse=True)
for choice in choices:
choice.percent = choice.votes / self.totalvotes * 100
return choices
class Choice(object):
def __init__(self, name):
self.name = name
self.votes = 0
self.percent = 0.0
class View(plugins.View):
def oninit(self):
self.sizer = wx.BoxSizer()
gridsizer = wx.GridSizer(rows=10, cols=3, hgap=1, vgap=1)
self.sizer.Add(gridsizer, 1, wx.EXPAND)
self.choicetexts = []
for _ in range(10):
name = wx.StaticText(self.panel)
name.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
name.SetForegroundColour((255, 255, 255))
votes = wx.StaticText(self.panel)
votes.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
votes.SetForegroundColour((255, 255, 255))
percent = wx.StaticText(self.panel)
percent.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
percent.SetForegroundColour((255, 255, 255))
gridsizer.Add(name, 1, wx.ALIGN_LEFT)
gridsizer.Add(votes, 1, wx.ALIGN_RIGHT)
gridsizer.Add(percent, 1, wx.ALIGN_RIGHT)
self.choicetexts.append((name, votes, percent))
self.panel.SetSizer(self.sizer)
def setchoices(self, choices):
if not choices:
return
for i, choice in enumerate(choices):
if i == 10:
break
self.choicetexts[i][0].SetLabel(choice.name)
self.choicetexts[i][1].SetLabel(str(choice.votes))
self.choicetexts[i][2].SetLabel(
'%' + str(round(choice.percent, 1)))
self.sizer.Layout()
class Plugin(plugins.Plugin):
name = 'Poll'
viewtype = View
def onopen(self):
self.poll = Poll()
pub.subscribe(self.onchatmessage, "service.chat.message")
def onclose(self):
pub.unsubscribe(self.onchatmessage, "service.chat.message")
self.poll = None
def onchatmessage(self, user, message):
if message and message.startswith("#") and not ' ' in message:
self.poll.vote(user, message)
self.view.setchoices(self.poll.choices())
| {
"repo_name": "ronald-d-rogers/twitchy",
"path": "plugins/poll.py",
"copies": "1",
"size": "3269",
"license": "bsd-2-clause",
"hash": -8789658703851810000,
"line_mean": 26.7033898305,
"line_max": 72,
"alpha_frac": 0.5747935148,
"autogenerated": false,
"ratio": 3.736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9800662815349057,
"avg_score": 0.0020261398901885892,
"num_lines": 118
} |
"""A live word cloud for Twitch chat.
"""
from __future__ import division
from collections import deque
from pubsub import pub
import wx
import gui
import plugins
class Tail(object):
def __init__(self, length=100):
self._words = {}
self._tail = deque(maxlen=length)
self._length = length
def add(self, line):
if not line:
return
for word in self._words.values():
word.frecency -= word.occurences
newline = []
for key in line:
if key in self._words:
word = self._words[key]
else:
word = Word(key)
self._words[key] = word
word.occurences += 1
word.frecency += self._length
newline.append(word)
if len(self._tail) == self._length:
for word in self._tail[0]:
word.occurences -= 1
if word.occurences == 0:
del self._words[word.word]
self._tail.append(newline)
def words(self):
return sorted(self._words.values(), key=lambda w: w.frecency, reverse=True)
class Word(object):
def __init__(self, word):
self.word = word
self.occurences = 0
self.frecency = 0
class View(plugins.View):
def oninit(self):
self.limit = 30
self.sizer = gui.FlowSizer()
self.sizer.SetLineSpacing(4)
self.iswindows = 'wxMSW' in wx.PlatformInfo
self.wordtexts = []
for _ in range(self.limit):
ctrl = wx.StaticText(self.panel, -1, '', style=wx.ALIGN_RIGHT)
ctrl.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
ctrl.SetForegroundColour((255, 255, 255))
self.sizer.Add(ctrl, 0, flag=wx.LEFT | wx.RIGHT, border=3)
self.wordtexts.append(ctrl)
self.panel.SetSizer(self.sizer)
def setwords(self, words):
if not words:
return
words = words[:self.limit]
min_fontsize = 10
max_fontsize = 30
fontsize_range = min_fontsize - max_fontsize
default_fontsize = min_fontsize + fontsize_range / 2
min_frecency = words[-1].frecency
max_frecency = words[0].frecency
frecency_range = min_frecency - max_frecency
for i, word in enumerate(words):
if frecency_range:
fontsize = min_fontsize + \
(((word.frecency - min_frecency) / frecency_range) * fontsize_range)
else:
fontsize = default_fontsize
# Windows wxPython hack
if self.iswindows:
self.wordtexts[i].SetFont(
wx.Font(fontsize, wx.ROMAN, wx.NORMAL, wx.NORMAL))
self.wordtexts[i].SetFont(
wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL))
self.wordtexts[i].SetLabel(word.word)
self.sizer.Layout()
IGNORE_DEFAULT = [
"a", "about", "all", "am", "an", "and", "any", "anything", "are", "as",
"at", "be", "because", "been", "but", "can", "can't", "cant", "come",
"could", "couldn't", "couldnt", "did", "didn't", "didnt", "do", "don't",
"dont", "for", "from", "get", "go", "going", "good", "got", "had", "has",
"have", "he", "her", "here", "he's", "hes", "hey", "him", "his", "how",
"i", "i'd", "if", "i'll", "ill", "i'm", "im", "in", "is", "it", "it's",
"its", "ive", "i've", "just", "like", "look", "me", "my", "no", "not",
"now", "of", "oh", "ok", "okay", "on", "one", "or", "out", "see", "she",
"she's", "shes", "so", "some", "than", "that", "that's", "thats", "the",
"them", "then", "there", "theres", "there's", "they", "they're", "theyre",
"think", "this", "time", "to", "u", "up", "want", "was", "we", "well",
"were", "what", "when", "where", "who", "why", "will", "with", "would",
"yeah", "yes", "you", "your", "you're", "youre"]
class Plugin(plugins.Plugin):
name = 'Word Cloud'
viewtype = View
def oninit(self):
self.filters = [lambda x: not "http://" in x and not "https://" in x]
self.ignore = IGNORE_DEFAULT + ["kappa", "keepo", "kreygasm"]
self.strip = " .;\"',:?!(){}/*-+=<>"
def onopen(self):
self.tail = Tail()
pub.subscribe(self.onchatmessage, "service.chat.message")
def onclose(self):
pub.unsubscribe(self.onchatmessage, "service.chat.message")
self.tail = None
def onchatmessage(self, user, message):
if not message:
return
for filter_ in self.filters:
if not filter_(message):
return
line = []
message = message.strip().split()
for word in message:
try:
word.decode('ascii')
except UnicodeDecodeError:
pass
else:
word = word.strip(self.strip).lower()
if word and not word in line and not word in self.ignore:
line.append(word)
if len(line):
self.tail.add(line)
self.view.setwords(self.tail.words())
| {
"repo_name": "ronald-d-rogers/twitchy",
"path": "plugins/wordcloud.py",
"copies": "1",
"size": "5135",
"license": "bsd-2-clause",
"hash": 6952330343516529000,
"line_mean": 30.1212121212,
"line_max": 88,
"alpha_frac": 0.5199610516,
"autogenerated": false,
"ratio": 3.4256170780520345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9444078895006733,
"avg_score": 0.0002998469290604122,
"num_lines": 165
} |
# -- ==alkane== --
import mbuild as mb
from mbuild.lib.moieties import CH2
from mbuild.lib.moieties import CH3
class Alkane(mb.Compound):
"""An alkane which may optionally end with a hydrogen or a Port."""
def __init__(self, n=3, cap_front=True, cap_end=True):
"""Initialize an Alkane Compound.
Args:
n: Number of carbon atoms.
cap_front: Add methyl group to beginning of chain ('down' port).
cap_end: Add methyl group to end of chain ('up' port).
"""
if n < 2:
raise ValueError('n must be 1 or more')
super(Alkane, self).__init__()
# Adjust length of Polmyer for absence of methyl terminations.
if not cap_front:
n += 1
if not cap_end:
n += 1
chain = mb.Polymer(CH2(), n=n-2, port_labels=('up', 'down'))
self.add(chain, 'chain')
if cap_front:
self.add(CH3(), "methyl_front")
mb.force_overlap(move_this=self['chain'],
from_positions=self['chain']['up'],
to_positions=self['methyl_front']['up'])
else:
# Hoist port label to Alkane level.
self.add(chain['up'], 'up', containment=False)
if cap_end:
self.add(CH3(), 'methyl_end')
mb.force_overlap(self['methyl_end'], self['methyl_end']['up'], self['chain']['down'])
else:
# Hoist port label to Alkane level.
self.add(chain['down'], 'down', containment=False)
# -- ==alkane== -- | {
"repo_name": "summeraz/mbuild",
"path": "mbuild/examples/alkane/alkane.py",
"copies": "4",
"size": "1577",
"license": "mit",
"hash": 4848906609465500000,
"line_mean": 33.3043478261,
"line_max": 97,
"alpha_frac": 0.5320228282,
"autogenerated": false,
"ratio": 3.5358744394618835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6067897267661884,
"avg_score": null,
"num_lines": null
} |
__all__ = ['abbrevs', 'states', 'mod_acttype_map', 'get_binding_site_name',
'get_mod_site_name']
from indra.statements import *
from indra.ontology.bio import bio_ontology
from .common import _n
abbrevs = {
'phosphorylation': 'phospho',
'ubiquitination': 'ub',
'farnesylation': 'farnesyl',
'hydroxylation': 'hydroxyl',
'acetylation': 'acetyl',
'sumoylation': 'sumo',
'glycosylation': 'glycosyl',
'methylation': 'methyl',
'ribosylation': 'ribosyl',
'geranylgeranylation': 'geranylgeranyl',
'palmitoylation': 'palmitoyl',
'myristoylation': 'myryl',
'modification': 'mod',
}
states = {
'phosphorylation': ['u', 'p'],
'ubiquitination': ['n', 'y'],
'farnesylation': ['n', 'y'],
'hydroxylation': ['n', 'y'],
'acetylation': ['n', 'y'],
'sumoylation': ['n', 'y'],
'glycosylation': ['n', 'y'],
'methylation': ['n', 'y'],
'geranylgeranylation': ['n', 'y'],
'palmitoylation': ['n', 'y'],
'myristoylation': ['n', 'y'],
'ribosylation': ['n', 'y'],
'modification': ['n', 'y'],
}
mod_acttype_map = {
Phosphorylation: 'kinase',
Dephosphorylation: 'phosphatase',
Hydroxylation: 'catalytic',
Dehydroxylation: 'catalytic',
Sumoylation: 'catalytic',
Desumoylation: 'catalytic',
Acetylation: 'catalytic',
Deacetylation: 'catalytic',
Glycosylation: 'catalytic',
Deglycosylation: 'catalytic',
Ribosylation: 'catalytic',
Deribosylation: 'catalytic',
Ubiquitination: 'catalytic',
Deubiquitination: 'catalytic',
Farnesylation: 'catalytic',
Defarnesylation: 'catalytic',
Palmitoylation: 'catalytic',
Depalmitoylation: 'catalytic',
Myristoylation: 'catalytic',
Demyristoylation: 'catalytic',
Geranylgeranylation: 'catalytic',
Degeranylgeranylation: 'catalytic',
Methylation: 'catalytic',
Demethylation: 'catalytic',
}
def get_binding_site_name(agent):
"""Return a binding site name from a given agent."""
# Try to construct a binding site name based on parent
grounding = agent.get_grounding()
# We don't want to accidentally deal with very deep ontological
# cases here such as CHEBI (e.g., GTP) which requires thousands
# of lookups to resolve
if grounding != (None, None) and grounding[0] in {'HGNC', 'FPLX'}:
print('Getting parents for %s' % str(grounding))
top_parents = bio_ontology.get_top_level_parents(*grounding)
if top_parents:
parent_name = bio_ontology.get_name(*top_parents[0])
if parent_name:
return _n(parent_name).lower()
return _n(agent.name).lower()
def get_mod_site_name(mod_condition):
"""Return site names for a modification."""
if mod_condition.residue is None:
mod_str = abbrevs[mod_condition.mod_type]
else:
mod_str = mod_condition.residue
mod_pos = mod_condition.position if \
mod_condition.position is not None else ''
name = ('%s%s' % (mod_str, mod_pos))
return name
| {
"repo_name": "johnbachman/belpy",
"path": "indra/assemblers/pysb/sites.py",
"copies": "1",
"size": "3029",
"license": "mit",
"hash": 4298471577382894000,
"line_mean": 31.5698924731,
"line_max": 75,
"alpha_frac": 0.6266094421,
"autogenerated": false,
"ratio": 2.940776699029126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9067386141129126,
"avg_score": 0,
"num_lines": 93
} |
__all__ = ['abbrevs', 'states', 'mod_acttype_map', 'get_binding_site_name',
'get_mod_site_name']
from indra.statements import *
from indra.tools.expand_families import _agent_from_uri
from indra.preassembler.hierarchy_manager import hierarchies
from .common import _n
abbrevs = {
'phosphorylation': 'phospho',
'ubiquitination': 'ub',
'farnesylation': 'farnesyl',
'hydroxylation': 'hydroxyl',
'acetylation': 'acetyl',
'sumoylation': 'sumo',
'glycosylation': 'glycosyl',
'methylation': 'methyl',
'ribosylation': 'ribosyl',
'geranylgeranylation': 'geranylgeranyl',
'palmitoylation': 'palmitoyl',
'myristoylation': 'myryl',
'modification': 'mod',
}
states = {
'phosphorylation': ['u', 'p'],
'ubiquitination': ['n', 'y'],
'farnesylation': ['n', 'y'],
'hydroxylation': ['n', 'y'],
'acetylation': ['n', 'y'],
'sumoylation': ['n', 'y'],
'glycosylation': ['n', 'y'],
'methylation': ['n', 'y'],
'geranylgeranylation': ['n', 'y'],
'palmitoylation': ['n', 'y'],
'myristoylation': ['n', 'y'],
'ribosylation': ['n', 'y'],
'modification': ['n', 'y'],
}
mod_acttype_map = {
Phosphorylation: 'kinase',
Dephosphorylation: 'phosphatase',
Hydroxylation: 'catalytic',
Dehydroxylation: 'catalytic',
Sumoylation: 'catalytic',
Desumoylation: 'catalytic',
Acetylation: 'catalytic',
Deacetylation: 'catalytic',
Glycosylation: 'catalytic',
Deglycosylation: 'catalytic',
Ribosylation: 'catalytic',
Deribosylation: 'catalytic',
Ubiquitination: 'catalytic',
Deubiquitination: 'catalytic',
Farnesylation: 'catalytic',
Defarnesylation: 'catalytic',
Palmitoylation: 'catalytic',
Depalmitoylation: 'catalytic',
Myristoylation: 'catalytic',
Demyristoylation: 'catalytic',
Geranylgeranylation: 'catalytic',
Degeranylgeranylation: 'catalytic',
Methylation: 'catalytic',
Demethylation: 'catalytic',
}
def get_binding_site_name(agent):
"""Return a binding site name from a given agent."""
# Try to construct a binding site name based on parent
grounding = agent.get_grounding()
if grounding != (None, None):
uri = hierarchies['entity'].get_uri(grounding[0], grounding[1])
# Get highest level parents in hierarchy
parents = hierarchies['entity'].get_parents(uri, 'top')
if parents:
# Choose the first parent if there are more than one
parent_uri = sorted(parents)[0]
parent_agent = _agent_from_uri(parent_uri)
binding_site = _n(parent_agent.name).lower()
return binding_site
# Fall back to Agent's own name if one from parent can't be constructed
binding_site = _n(agent.name).lower()
return binding_site
def get_mod_site_name(mod_condition):
"""Return site names for a modification."""
if mod_condition.residue is None:
mod_str = abbrevs[mod_condition.mod_type]
else:
mod_str = mod_condition.residue
mod_pos = mod_condition.position if \
mod_condition.position is not None else ''
name = ('%s%s' % (mod_str, mod_pos))
return name
| {
"repo_name": "pvtodorov/indra",
"path": "indra/assemblers/pysb/sites.py",
"copies": "2",
"size": "3177",
"license": "bsd-2-clause",
"hash": 8254361075936285000,
"line_mean": 32.09375,
"line_max": 75,
"alpha_frac": 0.6307837583,
"autogenerated": false,
"ratio": 3.022835394862036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9653619153162036,
"avg_score": 0,
"num_lines": 96
} |
# All abilities available
from ..champion.ability import AbilityRawDamage, \
AbilityOverTime, \
AbilityHeal
# Abilities for Ahri
ahri_q = AbilityRawDamage('Orb of Deception',
cooldown=0,
damage=10)
ahri_w = AbilityOverTime('Foxfire bite',
cooldown=3,
turns=3,
damage=25)
ahri_e = AbilityHeal('Charm',
cooldown=7,
health=20)
ahri_r = AbilityRawDamage('Dash',
cooldown=12,
damage=40)
# Abilities for Kata
kata_q = AbilityRawDamage('Bounding Blades',
cooldown=0,
damage=8)
kata_w = AbilityOverTime('Sinister Steel',
cooldown=3,
turns=2,
damage=35)
kata_e = AbilityHeal('Shunpo',
cooldown=6,
health=15)
kata_r = AbilityRawDamage('Death Lotus',
cooldown=15,
damage=70)
# Abilities for Veigar
veigar_q = AbilityRawDamage('Baleful Strike',
cooldown=0,
damage=10)
veigar_w = AbilityOverTime('Dark Matter',
cooldown=3,
turns=3,
damage=25)
veigar_e = AbilityHeal('Eevnt Horizon',
cooldown=7,
health=20)
veigar_r = AbilityRawDamage('Primordial Burst',
cooldown=12,
damage=40)
# Abilities for Kassadin
kassadin_q = AbilityRawDamage('Null Sphere',
cooldown=0,
damage=10)
kassadin_w = AbilityOverTime('Nether Blade',
cooldown=3,
turns=3,
damage=25)
kassadin_e = AbilityHeal('Force Pulse',
cooldown=7,
health=20)
kassadin_r = AbilityRawDamage('Riftwalk',
cooldown=12,
damage=40) | {
"repo_name": "JakeCowton/Pok-e-Lol",
"path": "environment/list_of_abilities.py",
"copies": "1",
"size": "1555",
"license": "mit",
"hash": 3593394670927041500,
"line_mean": 18.9487179487,
"line_max": 50,
"alpha_frac": 0.6167202572,
"autogenerated": false,
"ratio": 2.5617792421746293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8328574027436054,
"avg_score": 0.06998509438771507,
"num_lines": 78
} |
""" All about Artificial star tests """
from __future__ import print_function
import argparse
import logging
import os
from astropy.io import fits
import re
import sys
import matplotlib.pylab as plt
import numpy as np
from scipy.interpolate import interp1d
from .. import astronomy_utils
logger = logging.getLogger(__name__)
__all__ = ['ast_correct_starpop', 'ASTs', 'parse_pipeline']
plt.style.use('ggplot')
def parse_pipeline(filename):
'''find target and filters from the filename'''
name = os.path.split(filename)[1].upper()
# filters are assumed to be like F814W
starts = [m.start() for m in re.finditer('_F', name)]
starts.extend([m.start() for m in re.finditer('-F', name)])
starts = np.array(starts)
starts += 1
filters = [name[s: s+5] for s in starts]
for i, f in enumerate(filters):
try:
# sometimes you get FIELD...
int(f[1])
except:
filters.pop(i)
# the target name is assumed to be before the filters in the filename
pref = name[:starts[0]-1]
for t in pref.split('_'):
if t == 'IR':
continue
try:
# this could be the proposal ID
int(t)
except:
# a mix of str and int should be the target
target = t
return target, filters
def ast_correct_starpop(sgal, fake_file=None, outfile=None, overwrite=False,
asts_obj=None, correct_kw={}, diag_plot=False,
plt_kw={}, hdf5=True, correct='both'):
'''
correct mags with artificial star tests, finds filters by fake_file name
Parameters
----------
sgal : galaxies.SimGalaxy or StarPop instance
must have apparent mags (corrected for dmod and Av)
fake_file : string
matchfake file
outfile : string
if sgal, a place to write the table with ast_corrections
overwrite : bool
if sgal and outfile, overwite if outfile exists
asts_obj : AST instance
if not loading from fake_file
correct_kw : dict
passed to ASTs.correct important to consider, dxy, xrange, yrange
see AST.correct.__doc__
diag_plot : bool
make a mag vs mag diff plot
plt_kw :
kwargs to pass to pylab.plot
correct : 'both' 'filter1' 'filter2'
sepcifiy which filters get corrections
Returns
-------
adds corrected mag1 and mag2
If sgal, adds columns to sgal.data
'''
fmt = '{}_cor'
if asts_obj is None:
sgal.fake_file = fake_file
_, filter1, filter2 = parse_pipeline(fake_file)
if fmt.format(filter1) in sgal.data.keys() or fmt.format(filter2) in sgal.data.keys():
errfmt = '{}, {} ast corrections already in file.'
logger.warning(errfmt.format(filter1, filter2))
return sgal.data[fmt.format(filter1)], sgal.data[fmt.format(filter2)]
ast = ASTs(fake_file)
else:
ast = asts_obj
mag1 = sgal.data[ast.filter1]
mag2 = sgal.data[ast.filter2]
correct_kw = dict({'dxy': (0.2, 0.15)}.items() + correct_kw.items())
cor_mag1, cor_mag2 = ast.correct(mag1, mag2, **correct_kw)
if correct == 'filter2':
logger.info('adding corrections for {}'.format(ast.filter2))
names = [fmt.format(ast.filter2)]
data = [cor_mag2]
elif correct == 'filter1':
logger.info('adding corrections for {}'.format(ast.filter1))
names = [fmt.format(ast.filter1)]
data = [cor_mag1]
else:
logger.info('adding corrections for {}, {}'.format(ast.filter1, ast.filter2))
names = [fmt.format(ast.filter1), fmt.format(ast.filter2)]
data = [cor_mag1, cor_mag2]
sgal.add_data(names, data)
if outfile is not None:
sgal.write_data(outfile, overwrite=overwrite, hdf5=hdf5)
if diag_plot:
from ..fileio.fileIO import replace_ext
plt_kw = dict({'color': 'navy', 'alpha': 0.3, 'label': 'sim'}.items() \
+ plt_kw.items())
axs = ast.magdiff_plot()
mag1diff = cor_mag1 - mag1
mag2diff = cor_mag2 - mag2
rec, = np.nonzero((np.abs(mag1diff) < 10) & (np.abs(mag2diff) < 10))
axs[0].plot(mag1[rec], mag1diff[rec], '.', **plt_kw)
axs[1].plot(mag2[rec], mag2diff[rec], '.', **plt_kw)
if 'label' in plt_kw.keys():
[ax.legend(loc=0, frameon=False) for ax in axs]
plt.savefig(replace_ext(outfile, '_ast_correction.png'))
return cor_mag1, cor_mag2
class ASTs(object):
'''class for reading and using artificial stars'''
def __init__(self, filename, filter1=None, filter2=None, filt_extra=''):
'''
if filename has 'match' in it will assume this is a matchfake file.
if filename has .fits extention will assume it's a binary fits table.
'''
self.base, self.name = os.path.split(filename)
self.filter1 = filter1
self.filter2 = filter2
self.filt_extra = filt_extra
self.target, filters = parse_pipeline(filename)
try:
self.filter1, self.filter2 = filters
except:
self.filter1, self.filter2, self.filter3 = filters
self.read_file(filename)
def recovered(self, threshold=9.99):
'''
find indicies of stars with magdiff < threshold
Parameters
----------
threshold: float
[9.99] magin - magout threshold for recovery
Returns
-------
self.rec: list
recovered stars in both filters
rec1, rec2: list, list
recovered stars in filter1, filter2
'''
rec1, = np.nonzero(np.abs(self.mag1diff) < threshold)
rec2, = np.nonzero(np.abs(self.mag2diff) < threshold)
self.rec = list(set(rec1) & set(rec2))
if len(self.rec) == len(self.mag1diff):
logger.warning('all stars recovered')
return rec1, rec2
def make_hess(self, binsize=0.1, yattr='mag2diff', hess_kw={}):
'''make hess grid'''
self.colordiff = self.mag1diff - self.mag2diff
mag = self.__getattribute__(yattr)
self.hess = astronomy_utils.hess(self.colordiff, mag, binsize,
**hess_kw)
def read_file(self, filename):
'''
read MATCH fake file into attributes
format is mag1in mag1diff mag2in mag2diff
mag1 is assumed to be mag1in
mag2 is assumed to be mag2in
mag1diff is assumed to be mag1in-mag1out
mag2diff is assumed to be mag2in-mag2out
'''
if not filename.endswith('.fits'):
names = ['mag1', 'mag2', 'mag1diff', 'mag2diff']
self.data = np.genfromtxt(filename, names=names)
# unpack into attribues
for name in names:
self.__setattr__(name, self.data[name])
else:
assert not None in [self.filter1, self.filter2], \
'Must specify filter strings'
self.data = fits.getdata(filename)
self.mag1 = self.data['{}_IN'.format(self.filter1)]
self.mag2 = self.data['{}_IN'.format(self.filter2)]
mag1out = self.data['{}{}'.format(self.filter1, self.filt_extra)]
mag2out = self.data['{}{}'.format(self.filter2, self.filt_extra)]
self.mag1diff = self.mag1 - mag1out
self.mag2diff = self.mag2 - mag2out
def write_matchfake(self, newfile):
'''write matchfake file'''
dat = np.array([self.mag1, self.mag2, self.mag1diff, self.mag2diff]).T
np.savetxt(newfile, dat, fmt='%.3f')
def bin_asts(self, binsize=0.2, bins=None):
'''
bin the artificial star tests
Parameters
----------
bins: bins for the asts
binsize: width of bins for the asts
Returns
-------
self.am1_inds, self.am2_inds: the indices of the bins to
which each value in mag1 and mag2 belong (see np.digitize).
self.ast_bins: bins used for the asts.
'''
if bins is None:
ast_max = np.max(np.concatenate((self.mag1, self.mag2)))
ast_min = np.min(np.concatenate((self.mag1, self.mag2)))
self.ast_bins = np.arange(ast_min, ast_max, binsize)
else:
self.ast_bins = bins
self.am1_inds = np.digitize(self.mag1, self.ast_bins)
self.am2_inds = np.digitize(self.mag2, self.ast_bins)
def _random_select(self, arr, nselections):
'''
randomly sample arr nselections times
Parameters
----------
arr : array or list
input to sample
nselections : int
number of times to sample
Returns
-------
rands : array
len(nselections) of randomly selected from arr (duplicates included)
'''
rands = np.array([np.random.choice(arr) for i in range(nselections)])
return rands
def ast_correction(self, obs_mag1, obs_mag2, binsize=0.2, bins=None,
not_rec_val=np.nan, missing_data1=0., missing_data2=0.):
'''
Apply ast correction to input mags.
Corrections are made by going through obs_mag1 in bins of
bin_asts and randomly selecting magdiff values in that ast_bin.
obs_mag2 simply follows along since it is tied to obs_mag1.
Random selection was chosen because of the spatial nature of
artificial star tests. If there are 400 asts in one mag bin,
and 30 are not recovered, random selection should match the
distribution (if there are many obs stars).
If there are obs stars in a mag bin where there are no asts,
will throw the star out unless the completeness in that mag bin
is more than 50%.
Parameters
----------
obs_mag1, obs_mag2 : N, 1 arrays
input observerd mags
binsize, bins : sent to bin_asts
not_rec_val : float
value for not recovered ast
missing_data1, missing_data2 : float, float
value for data outside ast limits per filter (include=0)
Returns
-------
cor_mag1, cor_mag2: array, array
ast corrected magnitudes
Raises:
returns -1 if obs_mag1 and obs_mag2 are different sizes
To do:
Maybe not asssume combined_filters=True or completeness.
A minor issue unless the depth of the individual filters are
vastly different.
'''
self.completeness(combined_filters=True, interpolate=True)
nstars = obs_mag1.size
if obs_mag1.size != obs_mag2.size:
logger.error('mag arrays of different lengths')
return -1
# corrected mags are filled with nan.
cor_mag1 = np.empty(nstars)
cor_mag1.fill(not_rec_val)
cor_mag2 = np.empty(nstars)
cor_mag2.fill(not_rec_val)
# need asts to be binned for this method.
if not hasattr(self, 'ast_bins'):
self.bin_asts(binsize=binsize, bins=bins)
om1_inds = np.digitize(obs_mag1, self.ast_bins)
for i in range(len(self.ast_bins)):
# the obs and artificial stars in each bin
obsbin, = np.nonzero(om1_inds == i)
astbin, = np.nonzero(self.am1_inds == i)
nobs = len(obsbin)
nast = len(astbin)
if nobs == 0:
# no stars in this mag bin to correct
continue
if nast == 0:
# no asts in this bin, probably means the simulation
# is too deep
if self.fcomp2(self.ast_bins[i]) < 0.5:
continue
else:
# model is producing stars where there was no data.
# assign correction for missing data
cor1 = missing_data1
cor2 = missing_data2
else:
# randomly select the appropriate ast correction for obs stars
# in this bin
cor1 = self._random_select(self.mag1diff[astbin], nobs)
cor2 = self._random_select(self.mag2diff[astbin], nobs)
# apply corrections
cor_mag1[obsbin] = obs_mag1[obsbin] + cor1
cor_mag2[obsbin] = obs_mag2[obsbin] + cor2
# finite values only: not implemented because trilegal array should
# maintain the same size.
#fin1, = np.nonzero(np.isfinite(cor_mag1))
#fin2, = np.nonzero(np.isfinite(cor_mag2))
#fin = list(set(fin1) & set(fin2))
return cor_mag1, cor_mag2
def correct(self, obs_mag1, obs_mag2, bins=[100,200], xrange=[-0.5, 5.],
yrange=[15., 27.], not_rec_val=0., dxy=None):
"""
apply AST correction to obs_mag1 and obs_mag2
Parameters
----------
obs_mag1, obs_mag2 : arrays
input mags to correct
bins : [int, int]
bins to pass to graphics.plotting.crazy_histogram2d
xrange, yrange : shape 2, arrays
limits of cmd space send to graphics.plotting.crazy_histogram2d
since graphics.plotting.crazy_histogram2d is called twice it is
important to have same bin sizes
not_rec_val : float or nan
value to fill output arrays where obs cmd does not overlap with
ast cmd.
dxy : array shape 2,
color and mag step size to make graphics.plotting.crazy_histogram2d
Returns
-------
cor_mag1, cor_mag2 : arrays len obs_mag1, obs_mag2
corrections to obs_mag1 and obs_mag2
"""
from ..graphics.plotting import crazy_histogram2d as chist
nstars = obs_mag1.size
if obs_mag1.size != obs_mag2.size:
logger.error('mag arrays of different lengths')
return -1, -1
# corrected mags are filled with nan.
cor_mag1 = np.empty(nstars)
cor_mag1.fill(not_rec_val)
cor_mag2 = np.empty(nstars)
cor_mag2.fill(not_rec_val)
obs_color = obs_mag1 - obs_mag2
ast_color = self.mag1 - self.mag2
if dxy is not None:
# approx number of bins.
bins[0] = len(np.arange(*xrange, step=dxy[0]))
bins[1] = len(np.arange(*yrange, step=dxy[1]))
ckw = {'bins': bins, 'reverse_indices': True, 'xrange': xrange,
'yrange': yrange}
SH, _, _, sixy, sinds = chist(ast_color, self.mag2, **ckw)
H, _, _, ixy, inds = chist(obs_color, obs_mag2, **ckw)
x, y = np.nonzero(SH * H > 0)
# there is a way to do this with masking ...
for i, j in zip(x, y):
sind, = np.nonzero((sixy[:, 0] == i) & (sixy[:, 1] == j))
hind, = np.nonzero((ixy[:, 0] == i) & (ixy[:, 1] == j))
nobs = int(H[i, j])
xinds = self._random_select(sinds[sind], nobs)
cor_mag1[inds[hind]] = self.mag1diff[xinds]
cor_mag2[inds[hind]] = self.mag2diff[xinds]
return obs_mag1 + cor_mag1, obs_mag2 + cor_mag2
def completeness(self, combined_filters=False, interpolate=False,
binsize=0.2):
'''
calculate the completeness of the data in each filter
Parameters
----------
combined_filters : bool
Use individual or combined ast recovery
interpolate : bool
add a 1d spline the completeness function to self
Returns
-------
self.comp1, self.comp2 : array, array
the completeness per filter binned with self.ast_bins
'''
# calculate stars recovered, could pass theshold here.
rec1, rec2 = self.recovered()
# make sure ast_bins are good to go
if not hasattr(self, 'ast_bins'):
self.bin_asts(binsize=binsize)
# gst uses both filters for recovery.
if combined_filters is True:
rec1 = rec2 = self.rec
# historgram of all artificial stars
qhist1 = np.array(np.histogram(self.mag1, bins=self.ast_bins)[0],
dtype=float)
# histogram of recovered artificial stars
rhist1 = np.array(np.histogram(self.mag1[rec1], bins=self.ast_bins)[0],
dtype=float)
# completeness histogram
self.comp1 = rhist1 / qhist1
qhist2 = np.array(np.histogram(self.mag2, bins=self.ast_bins)[0],
dtype=float)
rhist2 = np.array(np.histogram(self.mag2[rec2], bins=self.ast_bins)[0],
dtype=float)
self.comp2 = rhist2 / qhist2
if interpolate is True:
# sometimes the histogram isn't as useful as the a spline
# function... add the interp1d function to self.
self.fcomp1 = interp1d(self.ast_bins[1:], self.comp1,
bounds_error=False)
self.fcomp2 = interp1d(self.ast_bins[1:], self.comp2,
bounds_error=False)
return
def get_completeness_fraction(self, frac, dmag=0.001, bright_lim=18):
"""Find the completeness magnitude at a given fraction"""
assert hasattr(self, 'fcomp1'), \
'need to run completeness with interpolate=True'
# set up array to evaluate interpolation
# sometimes with few asts at bright mags the curve starts with low
# completeness, reaches toward 1, and then declines as expected.
# To get around taking a value too bright, I search for values beginning
# at the faint end
search_arr = np.arange(bright_lim, 31, dmag)[::-1]
# completeness in each filter, and the finite vals
# (frac - nan = frac)
cfrac1 = self.fcomp1(search_arr)
ifin1 = np.isfinite(cfrac1)
cfrac2 = self.fcomp2(search_arr)
ifin2 = np.isfinite(cfrac2)
# closest completeness fraction to passed fraction
icomp1 = np.argmin(np.abs(frac - cfrac1[ifin1]))
icomp2 = np.argmin(np.abs(frac - cfrac2[ifin2]))
# mag associated with completeness
comp1 = search_arr[ifin1][icomp1]
comp2 = search_arr[ifin2][icomp2]
if comp1 == bright_lim or comp2 == bright_lim:
logger.warning('Completeness fraction is at mag search limit and probably wrong. '
'Try adjusting bright_lim')
return comp1, comp2
def magdiff_plot(self, axs=None):
"""Make a plot of input mag - output mag vs input mag"""
if not hasattr(self, 'rec'):
self.completeness(combined_filters=True)
if axs is None:
fig, axs = plt.subplots(ncols=2, figsize=(12, 6))
axs[0].plot(self.mag1[self.rec], self.mag1diff[self.rec], '.',
color='k', alpha=0.5)
axs[1].plot(self.mag2[self.rec], self.mag2diff[self.rec], '.',
color='k', alpha=0.5)
xlab = r'${{\rm Input}}\ {}$'
axs[0].set_xlabel(xlab.format(self.filter1), fontsize=20)
axs[1].set_xlabel(xlab.format(self.filter2), fontsize=20)
axs[0].set_ylabel(r'${{\rm Input}} - {{\rm Ouput}}$', fontsize=20)
return axs
def completeness_plot(self, ax=None, comp_fracs=None):
"""Make a plot of completeness vs mag"""
assert hasattr(self, 'fcomp1'), \
'need to run completeness with interpolate=True'
if ax is None:
fig, ax = plt.subplots()
ax.plot(self.ast_bins, self.fcomp1(self.ast_bins),
label=r'${}$'.format(self.filter1))
ax.plot(self.ast_bins, self.fcomp2(self.ast_bins),
label=r'${}$'.format(self.filter2))
if comp_fracs is not None:
self.add_complines(ax, *comp_fracs)
ax.set_xlabel(r'${{\rm mag}}$', fontsize=20)
ax.set_ylabel(r'${{\rm Completeness\ Fraction}}$', fontsize=20)
plt.legend(loc='lower left', frameon=False)
return ax
def add_complines(self, ax, *fracs, **get_comp_frac_kw):
"""add verticle lines to a plot at given completeness fractions"""
lblfmt = r'${frac}\ {filt}:\ {comp: .2f}$'
for frac in fracs:
ax.axhline(frac, alpha=0.5)
comp1, comp2 = self.get_completeness_fraction(frac,
**get_comp_frac_kw)
for comp, filt in zip((comp1, comp2), (self.filter1, self.filter2)):
lab = lblfmt.format(frac=frac, filt=filt, comp=comp)
ax.axvline(comp, label=lab,
color=next(ax._get_lines.color_cycle))
plt.legend(loc='lower left', frameon=False)
return ax
def main(argv):
parser = argparse.ArgumentParser(description="Calculate completeness fraction, make AST plots")
parser.add_argument('-c', '--comp_frac', type=float, default=0.9,
help='completeness fraction to calculate')
parser.add_argument('-p', '--makeplots', action='store_true',
help='make AST plots')
parser.add_argument('-m', '--bright_mag', type=float, default=20.,
help='brighest mag to consider for completeness frac')
parser.add_argument('-f', '--plot_fracs', type=str, default=None,
help='comma separated completeness fractions to overplot')
parser.add_argument('fake', type=str, nargs='*', help='match AST file(s)')
args = parser.parse_args(argv)
for fake in args.fake:
ast = ASTs(fake)
ast.completeness(combined_filters=True, interpolate=True,
binsize=0.15)
comp1, comp2 = ast.get_completeness_fraction(args.comp_frac,
bright_lim=args.bright_mag)
print('{} {} completeness fraction:'.format(fake, args.comp_frac))
print('{0:20s} {1:.4f} {2:.4f}'.format(ast.target, comp1, comp2))
if args.makeplots:
comp_name = os.path.join(ast.base, ast.name + '_comp.png')
ast_name = os.path.join(ast.base, ast.name + '_ast.png')
ax = ast.completeness_plot()
if args.plot_fracs is not None:
fracs = map(float, args.plot_fracs.split(','))
ast.add_complines(ax, *fracs, **{'bright_lim': args.bright_mag})
plt.savefig(comp_name)
plt.close()
ast.magdiff_plot()
plt.savefig(ast_name)
plt.close()
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "philrosenfield/ResolvedStellarPops",
"path": "galaxies/asts.py",
"copies": "1",
"size": "22715",
"license": "bsd-3-clause",
"hash": 3702728485513035000,
"line_mean": 35.4606741573,
"line_max": 99,
"alpha_frac": 0.5682588598,
"autogenerated": false,
"ratio": 3.646652753250923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710977534018019,
"avg_score": 0.0007868158065805823,
"num_lines": 623
} |
#all about string
#create string
s1 = "" #empty string
s2 = "hello" #string hello
#create string using str
s1 = str() #same as ""
s2 = str("hello") #same as "hello"
'''
note
string are immutable
two or more string object having the same value are shares the same object
'''
#assign value to s1 string object
s1 = "hello"
#length of the string
print(len(s1))
#largest character in string
print(max(s1))
#smallest character in string
print(min(s1))
#string
s = "python"
#index operator
print(s[0]) #0th element i.e., p
print(s[-1]) #last element i.e., n
#print characters at even index
for i in range(0, len(s), 2):
print(s[i], end="")
#slice
print(s[0:2]) #print element at index 0,1
print(s[2:]) #print element from index 2 to last
print(s[:]) #print all
print(s[2:-1]) #print element from index 2 to second last
#concatenate +
print("hello " + s)
#repeat
print(s*3) #print 'python' thrice
#test for string
print("come" in "welcome") #is 'come' in the word 'welcome'
print('hello' not in 'python')
#compare
print("hello" == "hello")
print("green" != "blue")
print("a" < "b")
print("c" > "C") #comparing ASCII value
print("abc" >= "ABC")
print("XYZ" <= "xyz")
#print string s character by character
for ch in s:
print(ch)
#testing string
print("hello".isalnum()) #True
print("132".isalnum()) #True
print("<?>".isalnum()) #False
print("abc".isalpha()) #True
print("123".isdigit()) #True
print(s.isidentifier()) #True
print("abc".islower()) #True
print("ABC".isupper()) #True
print(" ".isspace()) #True
#substring search
#s = "python"
print(s.endswith("thon")) #True
print(s.startswith("py")) #True
print(s.find("th")) #smallest index where 'th' occurs
print(s.rfind("th")) #largest index where 'th' occurs
print(s.count('th')) #count number of substring
#covert string
#s = "python"
#as string is immutable so the following functions will return a copy of the modified string
print(s.capitalize()) #first letter capitalize
print(s.lower()) #convert to lower case
print(s.upper()) #convert to upper case
print(s.swapcase()) #swapcase
print(s.replace('p','j')) #replace all occurance of 'p' with 'j'
#remove white spaces = ' ', '\t', '\f', '\r' and '\n'
print(" hello".lstrip()) #remove leading white space
print("hello ".rstrip()) #remove trailing white space
print(" hello ".strip()) #remove leading and trailing white space
| {
"repo_name": "yusufshakeel/Python-Project",
"path": "python3/string.py",
"copies": "1",
"size": "2513",
"license": "mit",
"hash": -8267142044450239000,
"line_mean": 24.13,
"line_max": 92,
"alpha_frac": 0.6358933546,
"autogenerated": false,
"ratio": 3.075887392900857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4211780747500857,
"avg_score": null,
"num_lines": null
} |
__all__ = ['AbstractCrudConf']
class AbstractCrudConf(object):
"""
This is the abstract crudconf your concrete CrudConf should inherit from.
A crudconf provides a common interface for the Crud views in order to provide
appropriate model instance url resolving in the template.
"""
model = None
list_display = []
detail_properties = []
list_display_detail_link_index = 0
# Default column-name for table-sort. To be overwritten in subclass
data_sort_name = ''
# Default sort-ordering
data_sort_order = 'asc'
def __init__(self, instance):
self.instance = instance
def get_model_verbose_name(self):
"""
Returns the model verbose name of the instance.
"""
return self.instance._meta.verbose_name
def get_model_verbose_name_plural(self):
"""
Returns the model verbose name plural.
"""
return self.instance._meta.verbose_name_plural
def get_origin_url(self):
"""
Return the URL that represents the page, this model's dialogs
can be reached from. E.g. consider Angebote, where Angebote can
be reached from fg_base_dashboard, this function should return the
reverse of that URL. It is used in order to provide a back link
to the originating page.
"""
return
def get_origin_title(self):
"""
Return the Title of the page.
"""
return 'Dashboard'
def get_display_name(self):
"""
Returns the string representation of the object bount to this
viewinfo object. E.g. get_display_name of an Angebot instance returns
the human-readable string representation of this angebot.
The default implementation is to return the instance's __unicode__.
"""
return str(self.instance)
def get_list_url(self):
"""
Returns the reversed URL for the list view.
"""
return
def get_create_url(self):
return
def get_detail_url(self):
return
def get_change_url(self):
return
def get_delete_url(self):
return
# --------------------------------------------------------------------------
def _get_field_strings(self, field_list):
"""
Takes a list of field name strings.
Returns that original list, or in case the field_list's first entry
equals '*' all fields of the associated model (self.instance).
"""
if len(field_list) and field_list[0] == '*':
return [field.name for field in self.model._meta.get_fields()]
else:
return field_list
def get_list_display(self):
"""
Returns a list of strings representing the names of the fields that
should be displayed in the list view table.
"""
return self._get_field_strings(self.list_display)
def get_detail_properties(self):
"""
Returns a list of strings representing the names of the fields that
should be displayed in the .
"""
return self._get_field_strings(self.detail_properties)
| {
"repo_name": "tonimichel/django-modelcrud",
"path": "modelcrud/abstract_crudconf.py",
"copies": "1",
"size": "3166",
"license": "mit",
"hash": 235525680066808450,
"line_mean": 29.4423076923,
"line_max": 81,
"alpha_frac": 0.5991787745,
"autogenerated": false,
"ratio": 4.465444287729196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5564623062229196,
"avg_score": null,
"num_lines": null
} |
__all__ = ["AbstractPointPen", "BasePointToSegmentPen", "PrintingPointPen",
"PrintingSegmentPen", "SegmentPrintingPointPen"]
class AbstractPointPen:
def beginPath(self):
"""Start a new sub path."""
raise NotImplementedError
def endPath(self):
"""End the current sub path."""
raise NotImplementedError
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
"""Add a point to the current sub path."""
raise NotImplementedError
def addComponent(self, baseGlyphName, transformation):
"""Add a sub glyph."""
raise NotImplementedError
class BasePointToSegmentPen(AbstractPointPen):
"""Base class for retrieving the outline in a segment-oriented
way. The PointPen protocol is simple yet also a little tricky,
so when you need an outline presented as segments but you have
as points, do use this base implementation as it properly takes
care of all the edge cases.
"""
def __init__(self):
self.currentPath = None
def beginPath(self):
assert self.currentPath is None
self.currentPath = []
def _flushContour(self, segments):
"""Override this method.
It will be called for each non-empty sub path with a list
of segments: the 'segments' argument.
The segments list contains tuples of length 2:
(segmentType, points)
segmentType is one of "move", "line", "curve" or "qcurve".
"move" may only occur as the first segment, and it signifies
an OPEN path. A CLOSED path does NOT start with a "move", in
fact it will not contain a "move" at ALL.
The 'points' field in the 2-tuple is a list of point info
tuples. The list has 1 or more items, a point tuple has
four items:
(point, smooth, name, kwargs)
'point' is an (x, y) coordinate pair.
For a closed path, the initial moveTo point is defined as
the last point of the last segment.
The 'points' list of "move" and "line" segments always contains
exactly one point tuple.
"""
raise NotImplementedError
def endPath(self):
assert self.currentPath is not None
points = self.currentPath
self.currentPath = None
if not points:
return
if len(points) == 1:
# Not much more we can do than output a single move segment.
pt, segmentType, smooth, name, kwargs = points[0]
segments = [("move", [(pt, smooth, name, kwargs)])]
self._flushContour(segments)
return
segments = []
if points[0][1] == "move":
# It's an open contour, insert a "move" segment for the first
# point and remove that first point from the point list.
pt, segmentType, smooth, name, kwargs = points[0]
segments.append(("move", [(pt, smooth, name, kwargs)]))
points.pop(0)
else:
# It's a closed contour. Locate the first on-curve point, and
# rotate the point list so that it _ends_ with an on-curve
# point.
firstOnCurve = None
for i in range(len(points)):
segmentType = points[i][1]
if segmentType is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# Special case for quadratics: a contour with no on-curve
# points. Add a "None" point. (See also the Pen protocol's
# qCurveTo() method and fontTools.pens.basePen.py.)
points.append((None, "qcurve", None, None, None))
else:
points = points[firstOnCurve+1:] + points[:firstOnCurve+1]
currentSegment = []
for pt, segmentType, smooth, name, kwargs in points:
currentSegment.append((pt, smooth, name, kwargs))
if segmentType is None:
continue
segments.append((segmentType, currentSegment))
currentSegment = []
self._flushContour(segments)
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
class PrintingPointPen(AbstractPointPen):
def __init__(self):
self.havePath = False
def beginPath(self):
self.havePath = True
print "pen.beginPath()"
def endPath(self):
self.havePath = False
print "pen.endPath()"
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
assert self.havePath
args = ["(%s, %s)" % (pt[0], pt[1])]
if segmentType is not None:
args.append("segmentType=%r" % segmentType)
if smooth:
args.append("smooth=True")
if name is not None:
args.append("name=%r" % name)
if kwargs:
args.append("**%s" % kwargs)
print "pen.addPoint(%s)" % ", ".join(args)
def addComponent(self, baseGlyphName, transformation):
assert not self.havePath
print "pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation))
from fontTools.pens.basePen import AbstractPen
class PrintingSegmentPen(AbstractPen):
def moveTo(self, pt):
print "pen.moveTo(%s)" % (pt,)
def lineTo(self, pt):
print "pen.lineTo(%s)" % (pt,)
def curveTo(self, *pts):
print "pen.curveTo%s" % (pts,)
def qCurveTo(self, *pts):
print "pen.qCurveTo%s" % (pts,)
def closePath(self):
print "pen.closePath()"
def endPath(self):
print "pen.endPath()"
def addComponent(self, baseGlyphName, transformation):
print "pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation))
class SegmentPrintingPointPen(BasePointToSegmentPen):
def _flushContour(self, segments):
from pprint import pprint
pprint(segments)
if __name__ == "__main__":
p = SegmentPrintingPointPen()
from robofab.test.test_pens import TestShapes
TestShapes.onCurveLessQuadShape(p)
| {
"repo_name": "anthrotype/robofab",
"path": "Lib/robofab/pens/pointPen.py",
"copies": "8",
"size": "5314",
"license": "bsd-3-clause",
"hash": 4199964024104252400,
"line_mean": 29.7167630058,
"line_max": 77,
"alpha_frac": 0.6989085435,
"autogenerated": false,
"ratio": 3.2461820403176542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.031339734010377575,
"num_lines": 173
} |
__all__ = ['AbundanceFunction', 'add_scatter', 'rematch', 'LF_SCATTER_MULT']
import numpy as np
from scipy.optimize import curve_fit
try:
from fiducial_deconv_wrapper import fiducial_deconvolute
except (OSError, ImportError):
_has_fiducial_deconvolute = False
else:
_has_fiducial_deconvolute = True
LF_SCATTER_MULT = 2.5
def _diff(a):
return a[1:]-a[:-1]
def _bright_end_func(x, a, b, c, d):
return -np.exp(a*x+b) + c*x + d
def _convolve_gaussian(y, sigma, truncate=4):
sd = float(sigma)
size = int(np.ceil(truncate * sd))
weights = np.zeros(size*2+1)
i = np.arange(size+1)
weights[size:] = np.exp(-(i*i)/(2.0*sd*sd))
weights[:size] = weights[:size:-1]
weights /= weights.sum()
y_full = np.concatenate((np.zeros(size), y, np.ones(size)*y[-1]))
return np.convolve(y_full, weights, 'valid')
def add_scatter(x, scatter, in_place=False):
"""
Add a Gaussian scatter to x.
Parameters
----------
x : array_like
Values to add scatter to.
scatter : float
Standard deviation (sigma) of the Gaussian.
in_place : bool, optional
Whether to add the scatter to x in place or return a
new array.
Returns
-------
x : array_like
x with the added scatter.
"""
if in_place:
x += np.random.randn(*x.shape)*float(scatter)
else:
x = np.asarray(x)
x = x + np.random.randn(*x.shape)*float(scatter)
return x
def rematch(catalog1, catalog2, greatest_first=True, \
catalog2_sorted=False):
"""
Substitute the values in catalog1 with the values in catalog2,
accroding to the ranks of both arrays. Values of NaN and INF are
excluded automatically.
Parameters
----------
catalog1 : array_like
1-d array in which all the finite values to be substituted by the
values in catalog2.
catalog2 : array_like
1-d array in which the values to be substituted for the values in
catalog1.
greatest_first : bool, optional
If True (default), the assignment starts with the greatest values.
catalog2_sorted : bool, opional
If True, do not re-sort catalog2 again.
Returns
-------
catalog : ndarray
An array that has the same size as catalog1, and all the values are
substitute by the values in catalog2, according to the ranks.
"""
arr2 = np.asarray(catalog2)
if not catalog2_sorted:
arr2 = arr2[np.isfinite(arr2)]
arr2.sort()
if greatest_first:
arr2 = arr2[::-1]
arr1 = np.array(catalog1)
f = np.where(np.isfinite(arr1))[0]
s = np.argsort(arr1[f])
if greatest_first:
s = s[::-1]
arr1[f[s[:len(arr2)]]] = arr2[:len(s)]
arr1[f[s[len(arr2):]]] = np.nan
return arr1
def _to_float(x, default=np.nan):
try:
xf = float(x)
except (ValueError, TypeError):
return default
return xf
class AbundanceFunction:
def __init__(self, x, phi, ext_range=(None, None), nbin=1000, \
faint_end_first=False, faint_end_slope='fit', \
faint_end_fit_points=3, bright_end_fit_points=-1):
"""
This class can interpolate and extrapolate an abundance function,
and also provides fiducial deconvolution and abundance matching.
Parameters
----------
x : array_like
The abundance proxy, usually is magnitude or log(stellar mass).
`log(phi)` should roughly be linear in `x`.
phi : array_like
The abundance value, in the unit of x^{-1} vol^{-1}.
The integrate phi over x should result in number density.
`x` and `phi` must have the same size.
ext_range : tuple, optional
The minimal and maximal value in x to extrapolate abundance
function.
nbin : int, optional
Number of points to interpolate the abundance function.
faint_end_first : bool, optional
Whether `x` and `phi` are listed from faint end to bright end.
If False (default), assumes bright end listed first.
faint_end_slope : str or float, optional
If 'fit', fit the faint-end slope from data.
If a float number, use it as the faint-end slope.
faint_end_fit_points : int, optional
Number of points to fit the faint-end slope.
Only used if `faint_end_slope` is 'fit'.
bright_end_fit_points : int, optional
Number of points to fit the bright end.
If -1 (default), use all data to fit.
Notes
-----
To do abundance matching, see member functions `deconvolute`
and `match`.
"""
x = np.ravel(x)
phi_log = np.log(phi).flatten()
if len(x) != len(phi_log):
raise ValueError('`x` and `phi` must have the same size!')
bright_end_fit_points = min(int(bright_end_fit_points), len(x))
if bright_end_fit_points < 0:
bright_end_fit_points = len(x)
elif bright_end_fit_points < 4:
raise ValueError('`bright_end_fit_points` must be -1 or larger than 3')
if faint_end_slope == 'fit':
faint_end_fit_points = min(int(faint_end_fit_points), len(x))
if faint_end_fit_points < 2:
faint_end_fit_points = 0
faint_end_slope = 0
else:
faint_end_slope = float(faint_end_slope)
faint_end_fit_points = 0
ext_min, ext_max = ext_range
ext_min = _to_float(ext_min, x[0])
ext_max = _to_float(ext_max, x[-1])
if faint_end_first:
x = x[::-1]
phi_log = phi_log[::-1]
ext_min, ext_max = ext_max, ext_min
x_new = np.linspace(ext_min, ext_max, num=int(nbin)+1)
dx = _diff(x)
if all(dx > 0): #like luminosity
self._x_flipped = False
bright_end_flag = (x_new < x[0])
faint_end_flag = (x_new > x[-1])
elif all(dx < 0): #like stellar mass
self._x_flipped = True
bright_end_flag = (x_new > x[0])
faint_end_flag = (x_new < x[-1])
else:
raise ValueError('x must be a strictly monotonic array.')
self._s = slice(None, None, -1 if self._x_flipped else None)
phi_log_new = np.empty_like(x_new)
flag = ~(bright_end_flag | faint_end_flag)
phi_log_new[flag] = np.interp(x_new[flag], x[self._s], phi_log[self._s])
#fit bright end
a0 = 1.0 if self._x_flipped else -1.0
s = slice(bright_end_fit_points)
popt = curve_fit(_bright_end_func, x[s], phi_log[s], [a0, 0, 0, 0], \
maxfev=100000)[0]
phi_log_new[bright_end_flag] = \
_bright_end_func(x_new[bright_end_flag], *popt)
#fit faint end
if faint_end_fit_points:
s = slice(-faint_end_fit_points, None)
popt = curve_fit(lambda x, a, b: a*x+b, x[s], phi_log[s], [0, 0], \
maxfev=100000)[0]
faint_end_slope = popt[0]
else:
faint_end_slope *= (np.log(10.0) if self._x_flipped else -np.log(10.0))
b = phi_log[-1]-faint_end_slope*x[-1]
phi_log_new[faint_end_flag] = x_new[faint_end_flag]*faint_end_slope + b
dx = np.fabs((x_new[-1]-x_new[0])/int(nbin))
phi_new = np.exp(phi_log_new)
flag = np.isfinite(phi_new)
x_new = x_new[flag]
phi_new = phi_new[flag]
dphi = _diff(phi_new)
phi_center = (phi_new[1:]+phi_new[:-1])*0.5
phi_int = dphi/_diff(phi_log_new)*dx
flag = (np.fabs(dphi)/phi_center < 1.0e-7)
if any(flag):
phi_int[flag] = phi_center[flag]*dx
phi_int_0 = phi_int[0]*phi_int[0]/phi_int[1]
phi_int = np.cumsum(np.insert(phi_int, 0, phi_int_0))
self._x = x_new
self._phi_log = phi_log_new
self._nd_log = np.log(phi_int)
self.nd_bounds = phi_int[0], phi_int[-1]
self._x_deconv = {}
def __call__(self, x):
"""
Return the abundnace values at x, i.e. phi(x).
Parameters
----------
x : array_like
The abundance proxy, usually is magnitude or log(stellar mass).
Returns
-------
phi : array_like
The abundnace values at x.
"""
return np.exp(np.interp(x, self._x[self._s], self._phi_log[self._s], \
np.nan, np.nan))
def number_density_at(self, x, scatter=0):
"""
The number density at x, i.e. return nd(x).
Parameters
----------
x : array_like
The abundance proxy, usually is magnitude or log(stellar mass).
scatter : float, optional
If not zero, it uses an abundance function that has been
deconvoluted with this amount of scatter.
Must run `deconvolute` before calling this function.
Returns
-------
nd : array_like
Number densities.
"""
scatter = float(scatter)
if scatter > 0:
try:
xp = self._x_deconv[scatter]
except (KeyError):
raise ValueError('Please run deconvolute first!')
else:
xp = self._x
return np.exp(np.interp(x, xp[self._s], self._nd_log[self._s], \
np.nan, np.nan))
def match(self, nd, scatter=0, do_add_scatter=True, do_rematch=True):
"""
Abundance matching: match number density to x, i.e. return x(nd).
Parameters
----------
nd : array_like
Number densities.
scatter : float, optional
If not zero, it uses an abundance function that has been
deconvoluted with this amount of scatter.
Must run `deconvolute` before calling this function.
do_add_scatter : bool, optional
Add scatter to the final catalog.
do_rematch : bool, optional
Rematch the final catalog to the abundance function.
Returns
-------
catalog : array_like
The abundance proxies (e.g. magnitude or log(stellar mass))
at the given number densities.
"""
scatter = float(scatter)
if scatter > 0:
try:
xp = self._x_deconv[scatter]
except (KeyError):
raise ValueError('Please run deconvolute first!')
else:
xp = self._x
x = np.interp(np.log(nd), self._nd_log, xp, np.nan, np.nan)
if scatter > 0:
if do_add_scatter:
x = add_scatter(x, scatter, True)
if do_rematch:
x2 = np.interp(np.log(nd), self._nd_log, self._x, np.nan, np.nan)
x = rematch(x, x2, self._x_flipped)
return x
def deconvolute(self, scatter, repeat=10, sm_step=0.005, return_remainder=True):
"""
Deconvolute the abundance function with a given scatter (assuming Gaussian)
This function uses Peter Behroozi's 'fiducial_deconvolute' in c code.
You must first compile fiducial_deconvolute to use this function.
Parameters
----------
scatter : float
Standard deviation (sigma) of the Gaussian, in the unit of x.
repeat : int, optional
Number of times to repeat fiducial deconvolute process.
This value can change the result significantly.
*Always* check a reasonable value is used.
sm_step : float, optional
Some parameter used in fiducial_deconvolute.
Using 0.01 or 0.005 is fine.
return_remainder : bool, optional
If True, calculate the remainder of this deconvolution.
*Always* check the reminder is reasonable before
doing abundance matching.
Returns
-------
remainder : array_like
Returned only if `return_remainder` is True.
"""
if not _has_fiducial_deconvolute:
raise NotImplementedError('Make sure you compliled fiducial_deconvolute.')
af_key = np.empty(len(self._x), float)
af_val = np.empty_like(af_key)
af_key[::-1] = self._x
if not self._x_flipped:
af_key *= -1.0
af_val[::-1] = self._phi_log
af_val /= np.log(10.0)
smm = np.empty_like(af_key)
mf = np.empty_like(af_key)
smm[::-1] = self._x
mf[::-1] = np.gradient(np.exp(self._nd_log))
if not self._x_flipped:
smm *= -1.0
smm = fiducial_deconvolute(af_key, af_val, smm, mf, scatter, repeat, sm_step)
if not self._x_flipped:
smm *= -1.0
smm = smm[::-1]
self._x_deconv[float(scatter)] = smm
if return_remainder:
nd = np.exp(np.interp(self._x, smm[self._s], self._nd_log[self._s]))
dx = np.fabs((self._x[-1] - self._x[0])/float(len(self._x)-1))
nd_conv = _convolve_gaussian(nd, float(scatter)/dx)
return nd_conv - np.exp(self._nd_log)
def get_abundance_table(self):
"""
Return the inter/extrapolated abundance table.
Returns
-------
x : array_like
Abundance proxy.
phi : array_like
Abundance value.
"""
return self._x, np.exp(self._phi_log)
def get_number_density_table(self):
"""
Return the inter/extrapolated number density table.
Returns
-------
x : array_like
Abundance proxy.
nd : array_like
Number density, i.e. int phi(x) dx.
"""
return self._x, np.exp(self._nd_log)
| {
"repo_name": "manodeep/yymao-abundancematching",
"path": "AbundanceMatching/AbundanceFunction.py",
"copies": "1",
"size": "13873",
"license": "mit",
"hash": -7428556703561822000,
"line_mean": 33.2543209877,
"line_max": 86,
"alpha_frac": 0.548331291,
"autogenerated": false,
"ratio": 3.487430869783811,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45357621607838106,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Account']
class Account(object):
"""The GoDaddyPy Account.
An account is used to provide authentication headers to the `godaddypy.Client`.
"""
_api_key = None
_api_secret = None
_SSO_KEY_TEMPLATE = 'sso-key {api_key}:{api_secret}'
def __init__(self, api_key, api_secret, delegate=None):
"""Create a new `godadypy.Account` object.
:type api_key: str or unicode
:param api_key: The API_KEY provided by GoDaddy
:type api_secret: str or unicode
:param api_secret: The API_SECRET provided by GoDaddy
"""
self._api_key = api_key
self._api_secret = api_secret
self._delegate = delegate
def get_headers(self):
headers = {
'Authorization': self._SSO_KEY_TEMPLATE.format(api_key=self._api_key,
api_secret=self._api_secret)
}
if self._delegate is not None:
headers['X-Shopper-Id'] = self._delegate
return headers
| {
"repo_name": "eXamadeus/godaddypy",
"path": "godaddypy/account.py",
"copies": "1",
"size": "1044",
"license": "bsd-3-clause",
"hash": -5470316910277671000,
"line_mean": 27.2162162162,
"line_max": 87,
"alpha_frac": 0.5632183908,
"autogenerated": false,
"ratio": 3.7689530685920576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48321714593920573,
"avg_score": null,
"num_lines": null
} |
__all__ = ["acor", "function"]
import numpy as np
from . import _acor
def acor(data, maxlag=10):
"""
Estimate the autocorrelation time of a time series
Parameters
----------
data : numpy.ndarray (N,) or (M, N)
The time series.
maxlag : int, optional
N must be greater than maxlag times the estimated autocorrelation
time.
Returns
-------
tau : float
An estimate of the autocorrelation time.
mean : float
The sample mean of data.
sigma : float
An estimate of the standard deviation of the sample mean.
"""
return _acor.acor(np.array(data), maxlag)
def function(data, maxt=None):
"""
Calculate the autocorrelation function for a 1D time series.
Parameters
----------
data : numpy.ndarray (N,)
The time series.
Returns
-------
rho : numpy.ndarray (N,)
An autocorrelation function.
"""
data = np.atleast_1d(data)
assert len(np.shape(data)) == 1, \
"The autocorrelation function can only by computed " \
+ "on a 1D time series."
if maxt is None:
maxt = len(data)
result = np.zeros(maxt, dtype=float)
_acor.function(np.array(data, dtype=float), result)
return result / result[0]
| {
"repo_name": "dfm/acor",
"path": "acor/acor.py",
"copies": "1",
"size": "1288",
"license": "mit",
"hash": 986648358606522200,
"line_mean": 20.8305084746,
"line_max": 73,
"alpha_frac": 0.5900621118,
"autogenerated": false,
"ratio": 3.799410029498525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9889472141298525,
"avg_score": 0,
"num_lines": 59
} |
__all__ = ('Actions', 'LoggerInterface', 'RunCallback', 'FileCallback', 'Logger', 'DummyLoggerInterface')
#
# Interface: group of loggers which can attach multiple loggers.
#
class Actions(object):
Run = 'run'
Open = 'open'
Fork = 'fork'
class LoggerInterface(object):
"""
Base class for logging interaction on hosts.
(Not threadsafe)
"""
def __init__(self):
self.loggers = []
def attach(self, logger):
"""
Attach logger to logging interface.
"""
self.loggers.append(logger)
def detach(self, logger):
"""
Remove logger from logging interface.
"""
self.loggers.remove(logger)
def attach_in_block(self, logger):
class LoggerAttachment(object):
def __enter__(context):
self.attach(logger)
def __exit__(context, *a):
self.detach(logger)
return LoggerAttachment()
def group(self, func_name, *args, **kwargs):
class LogGroup(object):
def __enter__(context):
context.loggers = self.loggers[:]
for l in context.loggers:
l.enter_group(func_name, *args, **kwargs)
def __exit__(context, *a):
for l in context.loggers:
l.leave_group()
return LogGroup()
def log_fork(self, fork_name):
class Fork(object):
entry_type = Actions.Fork
def __init__(entry):
entry.fork_name = fork_name
entry._callbacks = [ l.log_fork(entry) for l in self.loggers ]
entry.succeeded = None
entry.exception = None
def set_succeeded(entry):
entry.succeeded = True
for c in entry._callbacks:
c.completed()
def set_failed(entry, exception):
entry.succeeded = False
entry.exception = exception
for c in entry._callbacks:
c.completed()
def get_logger_interface(entry):
"""
Return a new logger interface object, which will be used
in this fork (thread).
"""
interface = LoggerInterface()
for c in entry._callbacks:
interface.attach(c.get_fork_logger())
return interface
return Fork()
def log_run(self, *a, **kwargs):
"""
Log SSH commands.
"""
class Run(object):
entry_type = Actions.Run
def __init__(entry, host=None, command=None, use_sudo=False, sandboxing=False, interactive=False, shell=False):
entry.host = host
entry.command = command
entry.use_sudo = use_sudo
entry.sandboxing = sandboxing
entry.interactive = interactive
entry.shell = shell
entry.status_code = 'unknown'
entry._callbacks = []
entry._io = []
def set_status_code(entry, status_code):
entry.status_code = status_code
@property
def succeeded(entry):
return entry.status_code == 0
@property
def io(entry):
return ''.join(entry._io)
def __enter__(entry):
entry._callbacks = [ l.log_run(entry) for l in self.loggers ]
return entry
def __exit__(entry, *a):
for c in entry._callbacks:
c.completed()
return Run(*a, **kwargs)
def log_file(self, host, **kwargs):
"""
Log a get/put/open actions on remote files.
"""
class File(object):
entry_type = Actions.Open
def __init__(entry, host, mode=None, remote_path=None, local_path=None, use_sudo=False, sandboxing=False):
entry.host = host
entry.remote_path = remote_path
entry.local_path = local_path
entry.mode = mode # Required for 'open()' action.
entry.use_sudo = use_sudo
entry.sandboxing = sandboxing
entry.succeeded = None # Unknown yet
def complete(entry, succeeded=True):
entry.succeeded = succeeded
def __enter__(entry):
entry._callbacks = [ l.log_file_opened(entry) for l in self.loggers ]
return entry
def __exit__(entry, *a):
for c in entry._callbacks:
c.file_closed()
return File(host, **kwargs)
def log_exception(self, e):
for l in self.loggers:
l.log_exception(e)
def log_msg(self, msg):
for l in self.loggers:
l.log_msg(msg)
class DummyLoggerInterface(LoggerInterface):
"""
Dummy logger, does nothing
"""
pass
#
# Base logger
#
class Logger(object):
#
# Following methods are to be overriden by specific loggers.
#
def enter_group(self, func_name, *args, **kwargs):
pass
def leave_group(self):
pass
def log_fork(self, fork_name):
return ForkCallback()
def log_run(self, run_entry):
return RunCallback()
def log_file_opened(self, file_entry):
return FileCallback()
def log_exception(self, e):
pass
def log_msg(self, msg):
pass
#
# Callbacks
#
class RunCallback(object):
def __init__(self, completed=None):
if completed:
self.completed = completed
def completed(self):
pass
class FileCallback(object):
def __init__(self, file_closed=None):
if file_closed:
self.file_closed = file_closed
def file_closed(self):
pass
class ForkCallback(object):
def completed(self):
pass
def get_fork_logger(self):
# Return Dummy logger
return Logger()
| {
"repo_name": "frjaraur/python-deployer",
"path": "deployer/loggers/__init__.py",
"copies": "2",
"size": "6064",
"license": "bsd-2-clause",
"hash": -7934930428111036,
"line_mean": 25.025751073,
"line_max": 123,
"alpha_frac": 0.517974934,
"autogenerated": false,
"ratio": 4.458823529411765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0040266531081009915,
"num_lines": 233
} |
__all__ = ["AdbImpl"]
import subprocess
import re
import time
from adb import Adb
from status import *
from base.thread_wrap import ExecuteGetResponse
from base.log import VLOG
class AdbImpl(Adb):
def GetDevices(self, devices):
devices[:] = []
p = subprocess.Popen(['adb', 'devices'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(res, errorMsg) = p.communicate()
if errorMsg:
return Status(kUnknownError, "Falied to get devices ")
res = res.split('\n')
# usually we target on only one device
if len(res) == 4:
devices.append(res[1].split('\t')[0])
return Status(kOk)
# maybe more than one devices
elif len(res) > 4:
for item in res[1:-2]:
devices.append(item.split('\t')[0])
return Status(kOk)
else:
return Status(kUnknownError, "Falied to get devices ")
def ForwardPort(self, device_serial, local_port, remote_abstract):
remote_abstract += "_devtools_remote"
VLOG(1, "ForwardPort(local_port: %s, remote_abstract: %s)" % (local_port, remote_abstract))
(status, res) = self.ExecuteCommand("adb -s " + device_serial + " forward tcp:" + local_port + \
" localabstract:" + remote_abstract)
if status.IsError():
return Status(kUnknownError, "Failed to forward ports to device " + device_serial + ": " + res)
return status
def SetCommandLineFile(self, device_serial, command_line_file, exec_name, args):
return Status(kOk)
def CheckAppInstalled(self, device_serial, package):
VLOG(1, "CheckAppInstalled(package: %s)" % package)
(status, res) = self.ExecuteCommand("adb -s " + device_serial + " shell pm path " + package)
if status.IsError():
return status
if res.find("package") == -1:
return Status(kUnknownError, package + " is not installed on device " + device_serial)
return Status(kOk)
def ClearAppData(self, device_serial, package):
VLOG(1, "ClearAppData(package: %s)" % package)
(status, res) = self.ExecuteCommand("adb -s " + device_serial + " shell pm clear " + package)
if status.IsError():
return status
if res.find("Success") == -1:
return Status(kUnknownError, "Failed to clear data for " + package + ": " + res)
return Status(kOk)
def SetDebugApp(self, device_serial, package):
VLOG(1, "SetDebugApp(package: %s)" % package)
(status, res) = self.ExecuteCommand("adb -s " + device_serial + " shell am set-debug-app --persistent " + package)
if status.IsError():
return status
return Status(kOk)
def Launch(self, device_serial, package, activity):
VLOG(1, "Launch(package: %s, activity: %s)" % (package, activity))
(status, res) = self.ExecuteCommand("adb -s " + device_serial + " shell am start -W -n " + package + "/" + activity + " -d data:,")
if status.IsError():
return status
if res.find("Complete") == -1:
return Status(kUnknownError, "Failed to start " + package + " on device " + device_serial + ": " + res)
return Status(kOk)
def ForceStop(self, device_serial, package):
VLOG(1, "ForceStop(package: %s)" % package)
(status, res) = self.ExecuteCommand("adb -s " + device_serial + " shell am force-stop " + package)
if status.IsError():
return status
return Status(kOk)
""" return status and pid<string> """
def GetPidByName(self, device_serial, process_name):
(status, res) = self.ExecuteCommand("adb -s " + device_serial + " shell ps")
if status.IsError():
return (status, "")
patt = r'\w+\s*(\d+)\s*\d+\s*\d+\s*\d+\s*\w+\s*\w+\s*\w\s*' + process_name
matchObj = re.search(patt, res)
if not matchObj:
return (Status(kUnknownError, "Failed to get PID for the following process: " + process_name), "")
pid = matchObj.groups()[0]
VLOG(1, "GetPidByName(process_name: %s, pid: %s)" % (process_name, pid))
return (Status(kOk), pid)
""" return status and response<string> """
def ExecuteCommand(self, command=""):
# default command execute timeout 30 seconds
return ExecuteGetResponse(command, 30).GetResponse()
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/adb_impl.py",
"copies": "1",
"size": "4109",
"license": "bsd-3-clause",
"hash": 2553199780192740000,
"line_mean": 39.2843137255,
"line_max": 135,
"alpha_frac": 0.6383548309,
"autogenerated": false,
"ratio": 3.4471476510067114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9456502586947035,
"avg_score": 0.02579997899193512,
"num_lines": 102
} |
__all__ = ['add_computed_fields']
from dynd._pydynd import as_py, as_numpy, w_type, \
w_array as array, make_cstruct, \
elwise_map, extract_dtype, type_of, \
dtype_of, ndim_of
class FieldExpr:
def __init__(self, dst_field_expr, src_field_names, fnname):
if fnname is None:
self.__name__ = 'computed_field_expr'
else:
self.__name__ = fnname
self.dst_field_expr = dst_field_expr
self.src_field_names = src_field_names
# Create a globals dict containing numpy and scipy
# for the expressions to use
import datetime
import numpy
import scipy
self.glbl = {}
self.glbl.update(datetime.__dict__)
self.glbl.update(numpy.__dict__)
self.glbl.update(scipy.__dict__)
self.glbl['as_py'] = as_py
self.glbl['as_numpy'] = as_numpy
def __call__(self, dst, src):
# Loop element by element
for dst_itm, src_itm in zip(dst, src):
# Put all the src fields in a locals dict
lcl = {}
for i, name in enumerate(self.src_field_names):
s = getattr(src_itm, name).eval()
if ndim_of(s) > 0 or dtype_of(s).kind == 'struct':
# For types which NumPy doesn't support, leave
# them as DyND arrays
try:
s = as_numpy(s, allow_copy=True)
except TypeError:
pass
else:
s = as_py(s)
lcl[str(name)] = s
# Evaluate all the field exprs
for i, expr in enumerate(self.dst_field_expr):
v = eval(expr, self.glbl, lcl)
dst_itm[i] = v
def add_computed_fields(n, fields, rm_fields=[], fnname=None):
"""
Adds one or more new fields to a struct array,
using nd.elwise_map to create the deferred object.
Each field_expr should be a string or bit of code
that can be evaluated with an 'eval' call. It is called
with numpy/scipy in the globals, and the input
fields in the locals.
Parameters
----------
n : dynd array
This should have a struct data dtype. The
result will be a view of this data.
fields : list of (field_name, field_type, field_expr)
These are the fields which are added to 'n'.
rm_fields : list of string, optional
For fields that are in the input, but have no expression,
this removes them from the output struct instead of
keeping the value.
fnname : string, optional
The function name, which affects how the resulting
deferred expression's dtype is printed.
Examples
--------
>>> from dynd import nd, ndt
>>> import numpy as np
>>> x = np.array([(2, 0), (0, -2), (3, 5), (4, 4)],
... dtype=[('x', np.float64), ('y', np.float64)])
>>> y = nd.add_computed_fields(x,
... fields=[('r', np.float64, 'sqrt(x*x + y*y)'),
... ('theta', np.float64, 'arctan2(y, x)')],
... rm_fields=['x', 'y'],
... fnname='topolar')
>>> y.dtype
ndt.type('strided_dim<expr<cstruct<float64 r, float64 theta>, op0=cstruct<float64 x, float64 y>, expr=topolar(op0)>>')
>>> y.eval()
nd.array([[2, 0], [2, -1.5708], [5.83095, 1.03038], [5.65685, 0.785398]], strided_dim<cstruct<float64 r, float64 theta>>)
>>> x[0] = (-100, 0)
>>> y[0].eval()
nd.array([100, 3.14159], cstruct<float64 r, float64 theta>)
"""
n = array(n)
udt = dtype_of(n).value_type
if udt.kind != 'struct':
raise ValueError("parameter 'n' must have kind 'struct'")
# The field names and types of the input struct
field_names = as_py(udt.field_names)
field_types = as_py(udt.field_types)
# Put the new field names in a dict as well
new_field_dict = {}
for fn, ft, fe in fields:
new_field_dict[fn] = w_type(ft)
# Create the output struct type and corresponding expressions
new_field_names = []
new_field_types = []
new_field_expr = []
for fn, ft in zip(field_names, field_types):
if fn not in new_field_dict and fn not in rm_fields:
new_field_names.append(fn)
new_field_types.append(ft)
new_field_expr.append(fn)
for fn, ft, fe in fields:
new_field_names.append(fn)
new_field_types.append(ft)
new_field_expr.append(fe)
result_udt = make_cstruct(new_field_types, new_field_names)
fieldexpr = FieldExpr(new_field_expr, field_names, fnname)
return elwise_map([n], fieldexpr, result_udt)
def make_computed_fields(n, replace_ndim, fields, fnname=None):
"""
Creates a new struct type, with fields computed based
on the input fields. Leaves the requested number of
array dimensions in place, so the result has fewer
than the input if positive.
Each field_expr should be a string or bit of code
that can be evaluated with an 'eval' call. It is called
with numpy/scipy in the globals, and the input
fields in the locals.
Parameters
----------
n : dynd array
This should have a struct data type. The
result will be a view of this data.
replace_ndim : integer
The number of array dimensions to leave in the
input going to the fields. For example if the
input has shape (3,4,2) and replace_ndim is 1,
the result will have shape (3,4), and each operand
provided to the field expression will have shape (2).
fields : list of (field_name, field_type, field_expr)
These are the fields which are created in the output.
No fields are retained from the input.
fnname : string, optional
The function name, which affects how the resulting
deferred expression's type is printed.
Examples
--------
>>> from dynd import nd, ndt
>>> a = nd.array([
... ('A', 1, 2), ('A', 3, 4),
... ('B', 1.5, 2.5), ('A', 0.5, 9),
... ('C', 1, 5), ('B', 2, 2)],
... dtype='{cat: string; x: float32; y: float32}')
>>> gb = nd.groupby(a, a.cat)
>>> gb.groups
nd.array(["A", "B", "C"], strided_dim<string>)
>>> b = nd.make_computed_fields(gb.eval(), 1,
... fields=[('sum_x', ndt.float32, 'sum(x)'),
... ('mean_y', ndt.float32, 'mean(y)'),
... ('max_x', ndt.float32, 'max(x)'),
... ('max_y', ndt.float32, 'max(y)'),
... ('min_y', ndt.float32, 'min(y)')])
>>> from pprint import pprint
>>> pprint(nd.as_py(b))
[{u'max_x': 3.0, u'max_y': 9.0, u'mean_y': 5.0, u'min_y': 2.0, u'sum_x': 4.5},
{u'max_x': 2.0, u'max_y': 2.5, u'mean_y': 2.25, u'min_y': 2.0, u'sum_x': 3.5},
{u'max_x': 1.0, u'max_y': 5.0, u'mean_y': 5.0, u'min_y': 5.0, u'sum_x': 1.0}]
"""
n = array(n)
udt = dtype_of(n).value_type
if udt.kind != 'struct':
raise ValueError("parameter 'n' must have kind 'struct'")
# The field names and types of the input struct
field_names = as_py(udt.field_names)
field_types = as_py(udt.field_types)
# Create the output struct type and corresponding expressions
new_field_names = []
new_field_types = []
new_field_expr = []
for fn, ft, fe in fields:
new_field_names.append(fn)
new_field_types.append(ft)
new_field_expr.append(fe)
result_udt = make_cstruct(new_field_types, new_field_names)
src_udt = extract_dtype(type_of(n), replace_ndim)
fieldexpr = FieldExpr(new_field_expr, field_names, fnname)
return elwise_map([n], fieldexpr, result_udt, [src_udt])
| {
"repo_name": "aterrel/dynd-python",
"path": "dynd/nd/computed_fields.py",
"copies": "1",
"size": "7791",
"license": "bsd-2-clause",
"hash": 6055825227964152000,
"line_mean": 37.1911764706,
"line_max": 125,
"alpha_frac": 0.5603901938,
"autogenerated": false,
"ratio": 3.3888647237929534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44492549175929536,
"avg_score": null,
"num_lines": null
} |
__all__ = ['add', 'filter2d']
import numpy as N
import os
import ctypes
_path = os.path.dirname('__file__')
lib = N.ctypeslib.load_library('code', _path)
_typedict = {'zadd' : complex,
'sadd' : N.single,
'cadd' : N.csingle,
'dadd' : float}
for name in _typedict.keys():
val = getattr(lib, name)
val.restype = None
_type = _typedict[name]
val.argtypes = [N.ctypeslib.ndpointer(_type, flags='aligned, contiguous'),
N.ctypeslib.ndpointer(_type, flags='aligned, contiguous'),
N.ctypeslib.ndpointer(_type, flags='aligned, contiguous,'\
'writeable'),
N.ctypeslib.c_intp]
lib.dfilter2d.restype=None
lib.dfilter2d.argtypes = [N.ctypeslib.ndpointer(float, ndim=2,
flags='aligned'),
N.ctypeslib.ndpointer(float, ndim=2,
flags='aligned, contiguous,'\
'writeable'),
ctypes.POINTER(N.ctypeslib.c_intp),
ctypes.POINTER(N.ctypeslib.c_intp)]
def select(dtype):
if dtype.char in ['?bBhHf']:
return lib.sadd, N.single
elif dtype.char in ['F']:
return lib.cadd, N.csingle
elif dtype.char in ['DG']:
return lib.zadd, complex
else:
return lib.dadd, float
return func, ntype
def add(a, b):
requires = ['CONTIGUOUS', 'ALIGNED']
a = N.asanyarray(a)
func, dtype = select(a.dtype)
a = N.require(a, dtype, requires)
b = N.require(b, dtype, requires)
c = N.empty_like(a)
func(a,b,c,a.size)
return c
def filter2d(a):
a = N.require(a, float, ['ALIGNED'])
b = N.zeros_like(a)
lib.dfilter2d(a, b, a.ctypes.strides, a.ctypes.shape)
return b
| {
"repo_name": "dagss/numpy_svn",
"path": "doc/numpybook/comparison/ctypes/interface.py",
"copies": "20",
"size": "1893",
"license": "bsd-3-clause",
"hash": 928509765542361700,
"line_mean": 32.2105263158,
"line_max": 78,
"alpha_frac": 0.5229793978,
"autogenerated": false,
"ratio": 3.479779411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ["add_noise_to_image"]
from .. import utils
from ..core import ants_image as iio
def add_noise_to_image(image,
noise_model,
noise_parameters
):
"""
Add noise to an image using additive Guassian, salt-and-pepper,
shot, or speckle noise.
ANTsR function: `addNoiseToImage`
Arguments
---------
image : ANTsImage
scalar image.
noise_model : string
'additivegaussian', 'saltandpepper', 'shot', or 'speckle'.
noise_parameters : tuple or array or float
'additivegaussian': (mean, standardDeviation)
'saltandpepper': (probability, saltValue, pepperValue)
'shot': scale
'speckle': standardDeviation
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> noise_image = ants.add_noise_to_image(image, 'additivegaussian', (0.0, 1.0))
>>> noise_image = ants.add_noise_to_image(image, 'saltandpepper', (0.1, 0.0, 100.0))
>>> noise_image = ants.add_noise_to_image(image, 'shot', 1.0)
>>> noise_image = ants.add_noise_to_image(image, 'speckle', 1.0)
"""
image_dimension = image.dimension
if noise_model == 'additivegaussian':
if len(noise_parameters) != 2:
raise ValueError("Incorrect number of parameters.")
libfn = utils.get_lib_fn("additiveGaussianNoiseF%i" % image_dimension)
noise = libfn(image.pointer, noise_parameters[0], noise_parameters[1])
output_image = iio.ANTsImage(pixeltype='float',
dimension=image_dimension, components=1,
pointer=noise).clone('float')
return output_image
elif noise_model == 'saltandpepper':
if len(noise_parameters) != 3:
raise ValueError("Incorrect number of parameters.")
libfn = utils.get_lib_fn("saltAndPepperNoiseF%i" % image_dimension)
noise = libfn(image.pointer, noise_parameters[0], noise_parameters[1], noise_parameters[2])
output_image = iio.ANTsImage(pixeltype='float',
dimension=image_dimension, components=1,
pointer=noise).clone('float')
return output_image
elif noise_model == 'shot':
if not isinstance(noise_parameters, (int, float)):
raise ValueError("Incorrect parameter specification.")
libfn = utils.get_lib_fn("shotNoiseF%i" % image_dimension)
noise = libfn(image.pointer, noise_parameters)
output_image = iio.ANTsImage(pixeltype='float',
dimension=image_dimension, components=1,
pointer=noise).clone('float')
return output_image
elif noise_model == 'speckle':
if not isinstance(noise_parameters, (int, float)):
raise ValueError("Incorrect parameter specification.")
libfn = utils.get_lib_fn("speckleNoiseF%i" % image_dimension)
noise = libfn(image.pointer, noise_parameters)
output_image = iio.ANTsImage(pixeltype='float',
dimension=image_dimension, components=1,
pointer=noise).clone('float')
return output_image
else:
raise ValueError("Unknown noise model.")
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/add_noise_to_image.py",
"copies": "1",
"size": "3211",
"license": "apache-2.0",
"hash": 7628047012011927000,
"line_mean": 35.908045977,
"line_max": 99,
"alpha_frac": 0.6206789162,
"autogenerated": false,
"ratio": 3.7555555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9838336742785365,
"avg_score": 0.007579545794037877,
"num_lines": 87
} |
__all__ = ['add_pressure_diffusion', 'cross1', 'cross2']
#runas import numpy as np; x = np.empty((3,2,5,7)); y = np.arange(210.).reshape(3,2,5,7); z = np.arange(210.).reshape(3,2,5,7) + 3; cross1(x,y,z)
#runas import numpy as np; x = np.empty((3,2,5,7), dtype=complex); y = np.arange(210.).reshape(3,2,5,7); z = np.arange(210., dtype=complex).reshape(3,2,5,7) + 3; cross2(x,y,z)
#runas import numpy as np; x = np.empty((3,2,5,7), dtype=complex); y = np.arange(210, dtype=np.int64).reshape(3,2,5,7); z = np.arange(210., dtype=complex).reshape(3,2,5,7) + 3; cross2(x,y,z)
#runas import numpy as np; x = np.ones((3,2,5,7), dtype=complex); y = np.arange(210, dtype=complex).reshape(3,2,5,7); z = np.arange(70.).reshape(2,5,7); w = np.arange(210.).reshape(3,2,5,7) + 3; t = np.arange(70., dtype=complex).reshape(2,5, 7) + 1 ; u = np.arange(210.).reshape(3,2,5,7) + 8; add_pressure_diffusion(x,y,z,w,t,u,3.)
#pythran export cross1(float[:,:,:,:], float[:,:,:,:], float[:,:,:,:])
def cross1(c, a, b):
c[0] = a[0] * b[2] - a[2] * b[1]
c[1] = a[2] * b[0] - a[0] * b[2]
c[2] = a[0] * b[1] - a[1] * b[0]
return c
#pythran export cross2(complex[:,:,:,:], float[:,:,:,:], complex[:,:,:,:])
#pythran export cross2(complex[:,:,:,:], int64[:,:,:,:], complex[:,:,:,:])
def cross2(c, a, b):
cross1(c, a, b)
c *= 1j
return c
#pythran export add_pressure_diffusion(
# complex[:,:,:,:], complex[:,:,:,:],
# float[:,:,:], float[:,:,:,:], complex[:,:,:],
# float[:,:,:,:], float)
def add_pressure_diffusion(dU, U_hat, K2, K, P_hat, K_over_K2, nu):
du0, du1, du2 = dU[0], dU[1], dU[2]
k_0, k_1, k_2 = K_over_K2[0], K_over_K2[1], K_over_K2[2]
P_hat[:] = du0*k_0+du1*k_1+du2*k_2
dU[:] = dU - P_hat*K - nu*K2*U_hat
return P_hat
| {
"repo_name": "serge-sans-paille/pythran",
"path": "pythran/tests/cases/spectralDNS.py",
"copies": "1",
"size": "1762",
"license": "bsd-3-clause",
"hash": 7824285375542159000,
"line_mean": 55.8387096774,
"line_max": 331,
"alpha_frac": 0.5505107832,
"autogenerated": false,
"ratio": 2.224747474747475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8240217496919313,
"avg_score": 0.0070081522056323585,
"num_lines": 31
} |
__all__ = ['add_to']
import logging
import sys
from typing import Callable, Optional, Type, Union
from fastapi import APIRouter, FastAPI, __version__
from fastapi.routing import APIRoute
try:
from fastapi import Request, Response
except ImportError:
# Added in FastAPI v0.51.0
from starlette.requests import Request
from starlette.responses import Response
import rollbar
from .utils import fastapi_min_version, get_installed_middlewares, has_bare_routing
from rollbar.contrib.asgi.integration import integrate
from rollbar.contrib.starlette.requests import store_current_request
from rollbar.lib._async import RollbarAsyncError, try_report
log = logging.getLogger(__name__)
@fastapi_min_version('0.41.0')
@integrate(framework_name=f'fastapi {__version__}')
def add_to(app_or_router: Union[FastAPI, APIRouter]) -> Optional[Type[APIRoute]]:
"""
Adds RollbarLoggingRoute handler to the router app.
This is the recommended way for integration with FastAPI.
Alternatively to using middleware, the handler may fill
more data in the payload (e.g. request body).
app_or_router: FastAPI app or router
Note: The route handler must be added before adding user routes
Requirements: FastAPI v0.41.0+
Example usage:
from fastapi import FastAPI
from rollbar.contrib.fastapi import add_to as rollbar_add_to
app = FastAPI()
rollbar_add_to(app)
"""
if not has_bare_routing(app_or_router):
log.error(
'RollbarLoggingRoute must to be added to a bare router'
' (before adding routes). See docs for more details.'
)
return None
installed_middlewares = get_installed_middlewares(app_or_router)
if installed_middlewares:
log.warning(
f'Detected middleware installed {installed_middlewares}'
' while loading Rollbar route handler.'
' This can cause in duplicate occurrences.'
)
if isinstance(app_or_router, FastAPI):
_add_to_app(app_or_router)
elif isinstance(app_or_router, APIRouter):
_add_to_router(app_or_router)
else:
log.error('Error adding RollbarLoggingRoute to application.')
return None
return RollbarLoggingRoute
class RollbarLoggingRoute(APIRoute):
def get_route_handler(self) -> Callable:
router_handler = super().get_route_handler()
async def rollbar_route_handler(request: Request) -> Response:
try:
store_current_request(request)
return await router_handler(request)
except Exception:
# FastAPI requires the `python-multipart` package to parse the content
if not request._stream_consumed:
await request.body()
await request.form()
exc_info = sys.exc_info()
try:
await try_report(exc_info, request)
except RollbarAsyncError:
log.warning(
'Failed to report asynchronously. Trying to report synchronously.'
)
rollbar.report_exc_info(exc_info, request)
raise
return rollbar_route_handler
def _add_to_app(app):
app.router.route_class = RollbarLoggingRoute
def _add_to_router(router):
router.route_class = RollbarLoggingRoute
| {
"repo_name": "rollbar/pyrollbar",
"path": "rollbar/contrib/fastapi/routing.py",
"copies": "1",
"size": "3401",
"license": "mit",
"hash": 900334528420618600,
"line_mean": 29.6396396396,
"line_max": 90,
"alpha_frac": 0.6530432226,
"autogenerated": false,
"ratio": 4.073053892215569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5226097114815569,
"avg_score": null,
"num_lines": null
} |
__all__ = ("AES",)
import os
import cryptography.hazmat.primitives.ciphers
import cryptography.hazmat.backends
_backend = cryptography.hazmat.backends.default_backend()
_aes = cryptography.hazmat.primitives.ciphers.algorithms.AES
_cipher = cryptography.hazmat.primitives.ciphers.Cipher
_ctrmode = cryptography.hazmat.primitives.ciphers.modes.CTR
_gcmmode = cryptography.hazmat.primitives.ciphers.modes.GCM
class AES(object):
key_sizes = [k//8 for k in sorted(_aes.key_sizes)]
block_size = _aes.block_size//8
@staticmethod
def KeyGen(size_bytes):
assert size_bytes in AES.key_sizes
return os.urandom(size_bytes)
@staticmethod
def CTREnc(key, plaintext):
iv = os.urandom(AES.block_size)
cipher = _cipher(_aes(key), _ctrmode(iv), backend=_backend).encryptor()
return iv + cipher.update(plaintext) + cipher.finalize()
@staticmethod
def CTRDec(key, ciphertext):
iv = ciphertext[:AES.block_size]
cipher = _cipher(_aes(key), _ctrmode(iv), backend=_backend).decryptor()
return cipher.update(ciphertext[AES.block_size:]) + \
cipher.finalize()
@staticmethod
def GCMEnc(key, plaintext):
iv = os.urandom(AES.block_size)
cipher = _cipher(_aes(key), _gcmmode(iv), backend=_backend).encryptor()
return iv + cipher.update(plaintext) + cipher.finalize() + cipher.tag
@staticmethod
def GCMDec(key, ciphertext):
iv = ciphertext[:AES.block_size]
tag = ciphertext[-AES.block_size:]
cipher = _cipher(_aes(key), _gcmmode(iv, tag), backend=_backend).decryptor()
return cipher.update(ciphertext[AES.block_size:-AES.block_size]) + \
cipher.finalize()
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/crypto/aes.py",
"copies": "1",
"size": "1732",
"license": "mit",
"hash": 7777166569445729000,
"line_mean": 35.0833333333,
"line_max": 84,
"alpha_frac": 0.6662817552,
"autogenerated": false,
"ratio": 3.6083333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4774615088533333,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Agent', 'BoundCondition', 'MutCondition', 'ModCondition',
'ActivityCondition', 'default_ns_order']
import logging
from collections import OrderedDict as _o
from indra.statements.statements import modtype_conditions, modtype_to_modclass
from .concept import Concept
from .resources import get_valid_residue, activity_types, amino_acids
logger = logging.getLogger(__name__)
default_ns_order = ['FPLX', 'UPPRO', 'HGNC', 'UP', 'CHEBI', 'GO', 'MESH',
'MIRBASE', 'DOID', 'HP', 'EFO']
class Agent(Concept):
"""A molecular entity, e.g., a protein.
Parameters
----------
name : str
The name of the agent, preferably a canonicalized name such as an
HGNC gene name.
mods : list of :py:class:`ModCondition`
Modification state of the agent.
bound_conditions : list of :py:class:`BoundCondition`
Other agents bound to the agent in this context.
mutations : list of :py:class:`MutCondition`
Amino acid mutations of the agent.
activity : :py:class:`ActivityCondition`
Activity of the agent.
location : str
Cellular location of the agent. Must be a valid name (e.g. "nucleus")
or identifier (e.g. "GO:0005634")for a GO cellular compartment.
db_refs : dict
Dictionary of database identifiers associated with this agent.
"""
def __init__(self, name, mods=None, activity=None,
bound_conditions=None, mutations=None,
location=None, db_refs=None):
super(Agent, self).__init__(name, db_refs=db_refs)
if mods is None:
self.mods = []
# Promote to list
elif isinstance(mods, ModCondition):
self.mods = [mods]
else:
self.mods = mods
if bound_conditions is None:
self.bound_conditions = []
# Promote to list
elif isinstance(bound_conditions, BoundCondition):
self.bound_conditions = [bound_conditions]
else:
self.bound_conditions = bound_conditions
if mutations is None:
self.mutations = []
elif isinstance(mutations, MutCondition):
self.mutations = [mutations]
else:
self.mutations = mutations
self.activity = activity
self.location = location
def matches_key(self):
"""Return a key to identify the identity and state of the Agent."""
key = (self.entity_matches_key(),
self.state_matches_key())
return str(key)
def entity_matches_key(self):
"""Return a key to identify the identity of the Agent not its state.
The key is based on the preferred grounding for the Agent, or if not
available, the name of the Agent is used.
Returns
-------
str
The key used to identify the Agent.
"""
db_ns, db_id = self.get_grounding()
if db_ns and db_id:
return str((db_ns, db_id))
return self.name
def state_matches_key(self):
"""Return a key to identify the state of the Agent."""
# NOTE: Making a set of the mod matches_keys might break if
# you have an agent with two phosphorylations at serine
# with unknown sites.
act_key = (self.activity.matches_key() if self.activity else None)
key = (sorted([m.matches_key() for m in self.mods]),
sorted([m.matches_key() for m in self.mutations]),
act_key, self.location,
len(self.bound_conditions),
tuple((bc.agent.matches_key(), bc.is_bound)
for bc in sorted(self.bound_conditions,
key=lambda x: x.agent.name)))
return str(key)
# Function to get the namespace to look in
def get_grounding(self, ns_order=None):
"""Return a tuple of a preferred grounding namespace and ID.
Returns
-------
tuple
A tuple whose first element is a grounding namespace (HGNC,
CHEBI, etc.) and the second element is an identifier in the
namespace. If no preferred grounding is available, a tuple of
Nones is returned.
"""
return get_grounding(self.db_refs, ns_order=ns_order)
def isa(self, other, ontology):
# Get the namespaces for the comparison
(self_ns, self_id) = self.get_grounding()
(other_ns, other_id) = other.get_grounding()
# If one of the agents isn't grounded to a relevant namespace,
# there can't be an isa relationship
if not all((self_ns, self_id, other_ns, other_id)):
return False
# Check for isa relationship
return ontology.isa_or_partof(self_ns, self_id, other_ns,
other_id)
def refinement_of(self, other, ontology, entities_refined=False):
from indra.databases import go_client
# Make sure the Agent types match
if type(self) != type(other):
return False
# ENTITIES
# Check that the basic entity of the agent either matches or is related
# to the entity of the other agent. If not, no match.
# If the entities, match, then we can continue
if not (entities_refined or
(self.entity_matches(other) or self.isa(other, ontology))):
return False
# BOUND CONDITIONS
# Now check the bound conditions. For self to be a refinement of
# other in terms of the bound conditions, it has to include all of the
# bound conditions in the other agent, and add additional context.
# TODO: For now, we do not check the bound conditions of the bound
# conditions.
# Iterate over the bound conditions in the other agent, and make sure
# they are all matched in self.
used_idx = set()
for bc_other in other.bound_conditions:
# Iterate over the bound conditions in self to find a match
bc_found = False
for idx, bc_self in enumerate(self.bound_conditions):
if (idx not in used_idx) and \
(bc_self.is_bound == bc_other.is_bound) and \
bc_self.agent.refinement_of(bc_other.agent, ontology):
bc_found = True
used_idx.add(idx)
break
# If we didn't find a match for this bound condition in self, then
# no refinement
if not bc_found:
return False
# MODIFICATIONS
# Similar to the above, we check that self has all of the modifications
# of other.
# Here we need to make sure that a mod in self.mods is only matched
# once to a mod in other.mods. Otherwise ('phoshporylation') would be
# considered a refinement of ('phosphorylation', 'phosphorylation')
matched_indices = []
# This outer loop checks that each modification in the other Agent
# is matched.
for other_mod in other.mods:
mod_found = False
# We need to keep track of indices for this Agent's modifications
# to make sure that each one is used at most once to match
# the modification of one of the other Agent's modifications.
for ix, self_mod in enumerate(self.mods):
if self_mod.refinement_of(other_mod, ontology):
# If this modification hasn't been used for matching yet
if ix not in matched_indices:
# Set the index as used
matched_indices.append(ix)
mod_found = True
break
# If we didn't find an exact match for this mod in other, then
# no refinement
if not mod_found:
return False
# MUTATIONS
# Similar to the above, we check that self has all of the mutations
# of other.
matched_indices = []
# This outer loop checks that each mutation in the other Agent
# is matched.
for other_mut in other.mutations:
mut_found = False
# We need to keep track of indices for this Agent's mutations
# to make sure that each one is used at most once to match
# the mutation of one of the other Agent's mutations.
for ix, self_mut in enumerate(self.mutations):
if self_mut.refinement_of(other_mut):
# If this mutation hasn't been used for matching yet
if ix not in matched_indices:
# Set the index as used
matched_indices.append(ix)
mut_found = True
break
# If we didn't find an exact match for this mut in other, then
# no refinement
if not mut_found:
return False
# LOCATION
# If the other location is specified and this one is not then self
# cannot be a refinement
if self.location is None:
if other.location is not None:
return False
# If both this location and the other one is specified, we check the
# hierarchy.
elif other.location is not None:
# If the other location is part of this location then
# self.location is not a refinement
sl = go_client.get_go_id_from_label(self.location)
ol = go_client.get_go_id_from_label(other.location)
if not ontology.isa_or_partof('GO', sl, 'GO', ol):
return False
# ACTIVITY
if self.activity is None:
if other.activity is not None:
return False
elif other.activity is not None:
if not self.activity.refinement_of(other.activity, ontology):
return False
# Everything checks out
return True
def equals(self, other):
matches = (self.name == other.name) and \
(self.activity == other.activity) and \
(self.location == other.location) and \
(self.db_refs == other.db_refs)
if len(self.mods) == len(other.mods):
for s, o in zip(self.mods, other.mods):
matches = matches and s.equals(o)
else:
return False
if len(self.mutations) == len(other.mutations):
for s, o in zip(self.mutations, other.mutations):
matches = matches and s.equals(o)
else:
return False
if len(self.bound_conditions) == len(other.bound_conditions):
for s, o in zip(self.bound_conditions, other.bound_conditions):
matches = matches and s.agent.equals(o.agent) and \
s.is_bound == o.is_bound
else:
return False
return matches
def to_json(self):
json_dict = _o({'name': self.name})
if self.mods:
json_dict['mods'] = [mc.to_json() for mc in self.mods]
if self.mutations:
json_dict['mutations'] = [mc.to_json() for mc in self.mutations]
if self.bound_conditions:
json_dict['bound_conditions'] = [bc.to_json() for bc in
self.bound_conditions]
if self.activity is not None:
json_dict['activity'] = self.activity.to_json()
if self.location is not None:
json_dict['location'] = self.location
json_dict['db_refs'] = self.db_refs
return json_dict
@classmethod
def _from_json(cls, json_dict):
name = json_dict.get('name')
db_refs = json_dict.get('db_refs', {})
mods = json_dict.get('mods', [])
mutations = json_dict.get('mutations', [])
activity = json_dict.get('activity')
bound_conditions = json_dict.get('bound_conditions', [])
location = json_dict.get('location')
if not name:
logger.error('Agent missing name.')
return None
if not db_refs:
db_refs = {}
agent = Agent(name, db_refs=db_refs)
agent.mods = [ModCondition._from_json(mod) for mod in mods]
agent.mutations = [MutCondition._from_json(mut) for mut in mutations]
agent.bound_conditions = [BoundCondition._from_json(bc)
for bc in bound_conditions]
agent.location = location
if activity:
agent.activity = ActivityCondition._from_json(activity)
return agent
def __str__(self):
attr_strs = []
if self.mods:
mod_str = 'mods: '
mod_str += ', '.join(['%s' % m for m in self.mods])
attr_strs.append(mod_str)
if self.activity:
if self.activity.is_active:
attr_strs.append('%s' % self.activity.activity_type)
else:
attr_strs.append('%s: %s' % (self.activity.activity_type,
self.activity.is_active))
if self.mutations:
mut_str = 'muts: '
mut_str += ', '.join(['%s' % m for m in self.mutations])
attr_strs.append(mut_str)
if self.bound_conditions:
attr_strs += ['bound: [%s, %s]' % (b.agent.name, b.is_bound)
for b in self.bound_conditions]
if self.location:
attr_strs += ['location: %s' % self.location]
#if self.db_refs:
# attr_strs.append('db_refs: %s' % self.db_refs)
attr_str = ', '.join(attr_strs)
agent_name = self.name
return '%s(%s)' % (agent_name, attr_str)
class BoundCondition(object):
"""Identify Agents bound (or not bound) to a given Agent in a given context.
Parameters
----------
agent : :py:class:`Agent`
Instance of Agent.
is_bound : bool
Specifies whether the given Agent is bound or unbound in the current
context. Default is True.
Examples
--------
EGFR bound to EGF:
>>> egf = Agent('EGF')
>>> egfr = Agent('EGFR', bound_conditions=[BoundCondition(egf)])
BRAF *not* bound to a 14-3-3 protein (YWHAB):
>>> ywhab = Agent('YWHAB')
>>> braf = Agent('BRAF', bound_conditions=[BoundCondition(ywhab, False)])
"""
def __init__(self, agent, is_bound=True):
self.agent = agent
self.is_bound = is_bound
def matches(self, other):
return (self.matches_key() == other.matches_key())
def matches_key(self):
key = (self.agent.matches_key, self.is_bound)
return str(key)
def to_json(self):
json_dict = _o({'agent': self.agent.to_json(),
'is_bound': self.is_bound})
return json_dict
@classmethod
def _from_json(cls, json_dict):
agent_entry = json_dict.get('agent')
if agent_entry is None:
logger.error('BoundCondition missing agent.')
return None
agent = Agent._from_json(agent_entry)
if agent is None:
return None
is_bound = json_dict.get('is_bound')
if is_bound is None:
logger.warning('BoundCondition missing is_bound, defaulting '
'to True.')
is_bound = True
bc = BoundCondition(agent, is_bound)
return bc
class MutCondition(object):
"""Mutation state of an amino acid position of an Agent.
Parameters
----------
position : str
Residue position of the mutation in the protein sequence.
residue_from : str
Wild-type (unmodified) amino acid residue at the given position.
residue_to : str
Amino acid at the position resulting from the mutation.
Examples
--------
Represent EGFR with a L858R mutation:
>>> egfr_mutant = Agent('EGFR', mutations=[MutCondition('858', 'L', 'R')])
"""
def __init__(self, position, residue_from, residue_to=None):
self.position = position
self.residue_from = get_valid_residue(residue_from)
self.residue_to = get_valid_residue(residue_to)
def matches(self, other):
return (self.matches_key() == other.matches_key())
def matches_key(self):
key = (str(self.position), str(self.residue_from),
str(self.residue_to))
return str(key)
def equals(self, other):
pos_match = (self.position == other.position)
residue_from_match = (self.residue_from == other.residue_from)
residue_to_match = (self.residue_to == other.residue_to)
return (pos_match and residue_from_match and residue_to_match)
def to_json(self):
json_dict = _o({'position': self.position,
'residue_from': self.residue_from,
'residue_to': self.residue_to})
return json_dict
def to_hgvs(self):
res_from = _aa_short_caps(self.residue_from)
res_to = _aa_short_caps(self.residue_to)
if res_to and res_from and self.position:
hgvs_str = 'p.%s%s%s' % (res_from, self.position, res_to)
elif res_to is None and res_from and self.position:
hgvs_str = 'p.%s%s?' % (res_from, self.position)
else:
hgvs_str = 'p.?'
return hgvs_str
@classmethod
def _from_json(cls, json_dict):
position = json_dict.get('position')
residue_from = json_dict.get('residue_from')
residue_to = json_dict.get('residue_to')
mc = cls(position, residue_from, residue_to)
return mc
def __str__(self):
s = '(%s, %s, %s)' % (self.residue_from, self.position,
self.residue_to)
return s
def __repr__(self):
return 'MutCondition' + str(self)
def refinement_of(self, other):
from_match = (self.residue_from == other.residue_from or
(self.residue_from is not None and other.residue_from is None))
to_match = (self.residue_to == other.residue_to or
(self.residue_to is not None and other.residue_to is None))
pos_match = (self.position == other.position or
(self.position is not None and other.position is None))
return (from_match and to_match and pos_match)
class ModCondition(object):
"""Post-translational modification state at an amino acid position.
Parameters
----------
mod_type : str
The type of post-translational modification, e.g., 'phosphorylation'.
Valid modification types currently include: 'phosphorylation',
'ubiquitination', 'sumoylation', 'hydroxylation', and 'acetylation'.
If an invalid modification type is passed an InvalidModTypeError is
raised.
residue : str or None
String indicating the modified amino acid, e.g., 'Y' or 'tyrosine'.
If None, indicates that the residue at the modification site is
unknown or unspecified.
position : str or None
String indicating the position of the modified amino acid, e.g., '202'.
If None, indicates that the position is unknown or unspecified.
is_modified : bool
Specifies whether the modification is present or absent. Setting the
flag specifies that the Agent with the ModCondition is unmodified
at the site.
Examples
--------
Doubly-phosphorylated MEK (MAP2K1):
>>> phospho_mek = Agent('MAP2K1', mods=[
... ModCondition('phosphorylation', 'S', '202'),
... ModCondition('phosphorylation', 'S', '204')])
ERK (MAPK1) unphosphorylated at tyrosine 187:
>>> unphos_erk = Agent('MAPK1', mods=(
... ModCondition('phosphorylation', 'Y', '187', is_modified=False)))
"""
def __init__(self, mod_type, residue=None, position=None,
is_modified=True):
if mod_type not in modtype_conditions:
logger.warning('Unknown modification type: %s' % mod_type)
self.mod_type = mod_type
self.residue = get_valid_residue(residue)
if isinstance(position, int):
self.position = str(position)
else:
self.position = position
self.is_modified = is_modified
def refinement_of(self, other, ontology):
if self.is_modified != other.is_modified:
return False
type_match = (self.mod_type == other.mod_type or
ontology.isa('INDRA_MODS', self.mod_type,
'INDRA_MODS', other.mod_type))
residue_match = (self.residue == other.residue or
(self.residue is not None and other.residue is None))
pos_match = (self.position == other.position or
(self.position is not None and other.position is None))
return (type_match and residue_match and pos_match)
def matches(self, other):
return (self.matches_key() == other.matches_key())
def matches_key(self):
key = (str(self.mod_type), str(self.residue),
str(self.position), str(self.is_modified))
return str(key)
def __str__(self):
ms = '%s' % self.mod_type
if self.residue is not None:
ms += ', %s' % self.residue
if self.position is not None:
ms += ', %s' % self.position
if not self.is_modified:
ms += ', False'
ms = '(' + ms + ')'
return ms
def __repr__(self):
return str(self)
def to_json(self):
json_dict = _o({'mod_type': self.mod_type})
if self.residue is not None:
json_dict['residue'] = self.residue
if self.position is not None:
json_dict['position'] = self.position
json_dict['is_modified'] = self.is_modified
return json_dict
@classmethod
def _from_json(cls, json_dict):
mod_type = json_dict.get('mod_type')
if not mod_type:
logger.error('ModCondition missing mod_type.')
return None
if mod_type not in modtype_to_modclass.keys():
logger.warning('Unknown modification type: %s' % mod_type)
residue = json_dict.get('residue')
position = json_dict.get('position')
is_modified = json_dict.get('is_modified')
if is_modified is None:
logger.warning('ModCondition missing is_modified, defaulting '
'to True')
is_modified = True
mc = ModCondition(mod_type, residue, position, is_modified)
return mc
def equals(self, other):
type_match = (self.mod_type == other.mod_type)
residue_match = (self.residue == other.residue)
pos_match = (self.position == other.position)
is_mod_match = (self.is_modified == other.is_modified)
return (type_match and residue_match and pos_match and is_mod_match)
def __hash__(self):
return hash(self.matches_key())
class ActivityCondition(object):
"""An active or inactive state of a protein.
Examples
--------
Kinase-active MAP2K1:
>>> mek_active = Agent('MAP2K1',
... activity=ActivityCondition('kinase', True))
Transcriptionally inactive FOXO3:
>>> foxo_inactive = Agent('FOXO3',
... activity=ActivityCondition('transcription', False))
Parameters
----------
activity_type : str
The type of activity, e.g. 'kinase'. The basic, unspecified molecular
activity is represented as 'activity'. Examples of other activity
types are 'kinase', 'phosphatase', 'catalytic', 'transcription',
etc.
is_active : bool
Specifies whether the given activity type is present or absent.
"""
def __init__(self, activity_type, is_active):
if activity_type not in activity_types:
logger.warning('Invalid activity type: %s' % activity_type)
self.activity_type = activity_type
self.is_active = is_active
def refinement_of(self, other, ontology):
if self.is_active != other.is_active:
return False
if self.activity_type == other.activity_type:
return True
if ontology.isa('INDRA_ACTIVITIES', self.activity_type,
'INDRA_ACTIVITIES', other.activity_type):
return True
def equals(self, other):
type_match = (self.activity_type == other.activity_type)
is_act_match = (self.is_active == other.is_active)
return (type_match and is_act_match)
def matches(self, other):
return self.matches_key() == other.matches_key()
def matches_key(self):
key = (str(self.activity_type), str(self.is_active))
return str(key)
def to_json(self):
json_dict = _o({'activity_type': self.activity_type,
'is_active': self.is_active})
return json_dict
@classmethod
def _from_json(cls, json_dict):
activity_type = json_dict.get('activity_type')
is_active = json_dict.get('is_active')
if not activity_type:
logger.error('ActivityCondition missing activity_type, ' +
'defaulting to `activity`')
activity_type = 'activity'
if is_active is None:
logger.warning('ActivityCondition missing is_active, ' +
'defaulting to True')
is_active = True
ac = ActivityCondition(activity_type, is_active)
return ac
def __str__(self):
s = '%s' % self.activity_type
if not self.is_active:
s += ', False'
s = '(' + s + ')'
return s
def __repr__(self):
return str(self)
def _aa_short_caps(res):
if res is None:
return None
res_info = amino_acids.get(res)
if not res_info:
return None
return res_info['short_name'].capitalize()
def get_grounding(db_refs, ns_order=None):
"""Return a tuple of a preferred grounding namespace and ID.
Parameters
----------
db_refs : dict
A dict of namespace to ID references associated with an agent.
ns_order : list
A list of namespaces which are in order of priority. The first
matched namespace will be used as the grounding.
Returns
-------
tuple
A tuple whose first element is a grounding namespace (HGNC,
CHEBI, etc.) and the second element is an identifier in the
namespace. If no preferred grounding is available, a tuple of
Nones is returned.
"""
if ns_order is None:
ns_order = default_ns_order
for db_ns in ns_order:
db_id = db_refs.get(db_ns)
if not db_id:
continue
if isinstance(db_id, (list, tuple)):
db_id = db_id[0]
return db_ns, db_id
return None, None
| {
"repo_name": "johnbachman/indra",
"path": "indra/statements/agent.py",
"copies": "3",
"size": "27030",
"license": "bsd-2-clause",
"hash": -2287019643849780000,
"line_mean": 36.3858921162,
"line_max": 85,
"alpha_frac": 0.5722900481,
"autogenerated": false,
"ratio": 4.003258293838862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6075548341938862,
"avg_score": null,
"num_lines": null
} |
__all__ = ['AGENT', 'SUMMARY', 'CONTROLS', 'HELP', 'COMMENT_FILE',
'SUBMISSION_FILE', 'COMMENT_EDIT_FILE']
AGENT = """\
desktop:https://github.com/michael-lazar/rtv:{version}\
(by /u/civilization_phaze_3)\
"""
SUMMARY = """
Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a
terminal window.
"""
CONTROLS = """
Controls
--------
RTV currently supports browsing both subreddits and individual submissions.
In each mode the controls are slightly different. In subreddit mode you can
browse through the top submissions on either the front page or a specific
subreddit. In submission mode you can view the self text for a submission and
browse comments.
"""
HELP = """
Basic Commands
`j/k` or `UP/DOWN` : Move the cursor up/down
`m/n` or `PgUp/PgDn`: Jump to the previous/next page
`o` or `ENTER` : Open the selected item as a webpage
`r` or `F5` : Refresh page content
`u` : Log in or switch accounts
`?` : Show the help screen
`q/Q` : Quit/Force quit
Authenticated Commands
`a/z` : Upvote/downvote
`c` : Compose a new post or comment
`e` : Edit an existing post or comment
`d` : Delete an existing post or comment
`i` : Display new messages prompt
`s` : Open/close subscribed subreddits list
Subreddit Mode
`l` or `RIGHT` : Enter the selected submission
`/` : Open a prompt to switch subreddits
`f` : Open a prompt to search the current subreddit
Submission Mode
`h` or `LEFT` : Return to subreddit mode
`SPACE` : Fold the selected comment, or load additional comments
"""
COMMENT_FILE = u"""
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Replying to {author}'s {type}
{content}
"""
COMMENT_EDIT_FILE = u"""{content}
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Editing your comment
"""
SUBMISSION_FILE = u"""{content}
# Please enter your submission. Lines starting with '#' will be ignored,
# and an empty field aborts the submission.
#
# The first line will be interpreted as the title
# The following lines will be interpreted as the content
#
# Posting to {name}
"""
| {
"repo_name": "yskmt/rtv",
"path": "rtv/docs.py",
"copies": "1",
"size": "2401",
"license": "mit",
"hash": 8857363787128090000,
"line_mean": 31.0133333333,
"line_max": 79,
"alpha_frac": 0.6426488963,
"autogenerated": false,
"ratio": 3.67687595712098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.981952485342098,
"avg_score": 0,
"num_lines": 75
} |
__all__ = ("AggregateTestCaseSummarySchema", "TestCaseSummarySchema", "TestCaseSchema")
from collections import defaultdict
from marshmallow import Schema, fields, pre_dump
from sqlalchemy import and_
from sqlalchemy.orm import subqueryload_all
from typing import Dict, List, Optional
from uuid import UUID
from zeus.config import db
from zeus.constants import Result, Status
from zeus.exceptions import UnknownRevision
from zeus.models import Build, Job, Revision, TestCase
from zeus.utils.aggregation import aggregate_result
from zeus.vcs import vcs_client
from .build import BuildSchema
from .fields import ResultField
from .job import JobSchema
def find_failure_origins(
build: Build, test_failures: List[str]
) -> Dict[str, Optional[UUID]]:
"""
Attempt to find originating causes of failures.
Returns a mapping of {TestCase.hash: Job}.
"""
if not test_failures:
return {}
repo = build.repository
try:
valid_revisions = [
c["sha"]
for c in vcs_client.log(repo.id, limit=100, parent=build.revision_sha)
]
except UnknownRevision:
valid_revisions = []
filters = [
Build.repository_id == build.repository_id,
Build.status == Status.finished,
Build.revision_sha != build.revision_sha,
]
if valid_revisions:
filters.extend([Build.revision_sha.in_(valid_revisions)])
# NOTE(dcramer): many of these queries ignore tenant constraints
# find any existing failures in the previous runs
# to do this we first need to find the last passing build
last_pass = (
db.session.query(Revision.sha, Revision.date_created)
.join(
Build,
and_(
Revision.sha == Build.revision_sha,
Revision.repository_id == Build.repository_id,
),
)
.filter(
Build.result == Result.passed,
Revision.date_created <= build.revision.date_created,
Build.date_created <= build.date_created,
*filters,
)
.order_by(Revision.date_created.desc())
.first()
)
if last_pass:
last_pass_revision_sha, last_pass_date = last_pass
# We have to query all runs between build and last_pass. Because we're
# paranoid about performance, we limit this to 100 results.
previous_build_ids = [
r
for r, in db.session.query(Build.id)
.join(
Revision,
and_(
Revision.sha == Build.revision_sha,
Revision.repository_id == Build.repository_id,
),
)
.filter(
Build.result == Result.failed,
Build.date_created >= last_pass_date,
Revision.date_created >= last_pass_date,
Revision.sha != last_pass_revision_sha,
*filters,
)
.order_by(Revision.date_created.desc())[:100]
]
else:
previous_build_ids = [
r
for r, in db.session.query(Build.id)
.join(
Revision,
and_(
Revision.sha == Build.revision_sha,
Revision.repository_id == Build.repository_id,
),
)
.filter(Build.result == Result.failed, *filters)
.order_by(Revision.date_created.desc())[:100]
]
if not previous_build_ids:
return {}
# we now have a list of previous_runs so let's find all test failures in
# these runs
queryset = (
db.session.query(TestCase.hash, Job.build_id)
.join(Job, Job.id == TestCase.job_id)
.filter(
Job.build_id.in_(previous_build_ids),
Job.status == Status.finished,
Job.result == Result.failed,
TestCase.result == Result.failed,
TestCase.hash.in_(test_failures),
)
.group_by(TestCase.hash, Job.build_id)
)
previous_test_failures = defaultdict(set)
for test_hash, build_id in queryset:
previous_test_failures[build_id].add(test_hash)
failures_at_build: Dict[str, Optional[UUID]] = {}
searching = set(t for t in test_failures)
# last_checked_run = build.id
last_checked_run = None
for p_build in previous_build_ids:
p_build_failures = previous_test_failures[p_build]
# we have to copy the set as it might change size during iteration
for f_test in list(searching):
if f_test not in p_build_failures:
failures_at_build[f_test] = last_checked_run
searching.remove(f_test)
last_checked_run = p_build
for f_test in searching:
failures_at_build[f_test] = last_checked_run
return failures_at_build
class ExecutionSchema(Schema):
id = fields.UUID(dump_only=True)
result = ResultField(required=True)
duration = fields.Number()
job_id = fields.UUID(required=True)
class AggregateTestCaseSummarySchema(Schema):
name = fields.Str(required=True)
hash = fields.Str(dump_only=True)
runs = fields.List(fields.Nested(ExecutionSchema), required=True)
result = ResultField(required=True)
message = fields.Str(required=False)
build = fields.Nested(
BuildSchema(exclude=("repository", "revision", "stats")), required=False
)
origin_build = fields.Nested(
BuildSchema(exclude=("repository", "revision", "stats")), required=False
)
@pre_dump(pass_many=True)
def process_aggregates(self, data, many, **kwargs):
if not data:
return data
if not many:
items = [data]
else:
items = data
if "origin_build" in self.exclude or "build" not in self.context:
failure_origins = {}
else:
# TODO(dcramer): technically this could support multiple builds,
# or identify the referenced build
failure_origins = find_failure_origins(
self.context["build"],
[
i.hash
for i in items
if any(Result(int(e[3])) == Result.failed for e in i.runs)
],
)
if "build" in self.exclude or not hasattr(items[0], "build_id"):
builds = {}
else:
builds = {
b.id: b
for b in Build.query.filter(
Build.id.in_(i.build_id for i in items)
).options(subqueryload_all(Build.authors))
}
if failure_origins:
origin_builds = {
b.id: b
for b in Build.query.filter(
Build.id.in_(frozenset(failure_origins.values()))
).options(subqueryload_all(Build.authors))
}
else:
origin_builds = {}
results = [
{
"hash": i.hash,
"name": i.name,
"runs": [
{
"id": UUID(e[0]),
"job_id": UUID(e[1]),
"duration": int(e[2]),
"result": Result(int(e[3])),
}
for e in i.runs
],
"build": builds.get(getattr(i, "build_id", None)),
"origin_build": origin_builds.get(failure_origins.get(i.hash)),
"result": aggregate_result(Result(int(e[3])) for e in i.runs),
}
for i in items
]
if many:
return results
return results[0]
class TestCaseSummarySchema(Schema):
id = fields.UUID(dump_only=True)
name = fields.Str(required=True)
hash = fields.Str(dump_only=True)
result = ResultField(required=True)
duration = fields.Number()
job = fields.Nested(JobSchema, required=True)
class TestCaseSchema(TestCaseSummarySchema):
message = fields.Str(required=False)
| {
"repo_name": "getsentry/zeus",
"path": "zeus/api/schemas/testcase.py",
"copies": "1",
"size": "8085",
"license": "apache-2.0",
"hash": 8716769067448475000,
"line_mean": 31.4698795181,
"line_max": 87,
"alpha_frac": 0.5573283859,
"autogenerated": false,
"ratio": 4.148281169830683,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5205609555730683,
"avg_score": null,
"num_lines": null
} |
ALL = 'all'
SHOW = 'show'
SHOW2 = '.show'
LEN = '_len'
def compute_allowed(table_dict):
master_list = set([])
for table in table_dict:
master_list = master_list.union(table.__table__.columns.keys())
for table in table_dict:
table_dict[table] = master_list.difference(table.__table__.columns.keys())
return table_dict
def compute_possible(col_list, allowable_tables):
''' allowable_tables should be ordered smallest => largest '''
result = {}
for col in col_list:
for table in allowable_tables:
if col in table.__table__.columns.keys():
if not col in result:
result[col] = []
result[col].append(table)
return result
def prepare(cols,tables):
possible_tables = compute_possible(cols, tables)
allowed_when_not_base = {table:set() for table in tables}
allowed_when_not = compute_allowed(allowed_when_not_base)
return allowed_when_not, possible_tables
def select_best_table(kwargs, allowed_when_not, possible_tables):
# -- step 1, get show column
show = "no_show"
required_columns = []
for column_name, value in kwargs.items():
if str(value).startswith(SHOW) or SHOW2 in str(value):
show = column_name
if str(value) != ALL:
required_columns.append(column_name)
# -- step 2, given the show + kwargs determine best table
table_choices = possible_tables[show]
for table in table_choices:
# -- this list is ordered, from smallest -> biggest
# -- so if we find a match, we take it
disallowed_columns = allowed_when_not[table]
if not disallowed_columns.intersection(required_columns):
return table
return None
| {
"repo_name": "DataViva/dataviva-site",
"path": "dataviva/utils/table_helper.py",
"copies": "1",
"size": "1757",
"license": "mit",
"hash": -8428654086332442000,
"line_mean": 34.14,
"line_max": 82,
"alpha_frac": 0.6277746158,
"autogenerated": false,
"ratio": 3.878587196467991,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5006361812267991,
"avg_score": null,
"num_lines": null
} |
__all__ = ['all_warnings', 'expected_warnings']
from contextlib import contextmanager
import sys
import warnings
import inspect
import re
@contextmanager
def all_warnings():
"""
Context for use in testing to ensure that all warnings are raised.
Examples
--------
>>> import warnings
>>> def foo():
... warnings.warn(RuntimeWarning("bar"))
We raise the warning once, while the warning filter is set to "once".
Hereafter, the warning is invisible, even with custom filters:
>>> with warnings.catch_warnings():
... warnings.simplefilter('once')
... foo()
We can now run ``foo()`` without a warning being raised:
>>> from numpy.testing import assert_warns
>>> foo()
To catch the warning, we call in the help of ``all_warnings``:
>>> with all_warnings():
... assert_warns(RuntimeWarning, foo)
"""
# Whenever a warning is triggered, Python adds a __warningregistry__
# member to the *calling* module. The exercize here is to find
# and eradicate all those breadcrumbs that were left lying around.
#
# We proceed by first searching all parent calling frames and explicitly
# clearing their warning registries (necessary for the doctests above to
# pass). Then, we search for all submodules of skimage and clear theirs
# as well (necessary for the skimage test suite to pass).
frame = inspect.currentframe()
if frame:
for f in inspect.getouterframes(frame):
f[0].f_locals['__warningregistry__'] = {}
del frame
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
try:
mod.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
@contextmanager
def expected_warnings(matching):
"""Context for use in testing to catch known warnings matching regexes
Parameters
----------
matching : list of strings or compiled regexes
Regexes for the desired warning to catch
Examples
--------
>>> from skimage import data, img_as_ubyte, img_as_float
>>> with expected_warnings(['precision loss']):
... d = img_as_ubyte(img_as_float(data.coins()))
Notes
-----
Uses `all_warnings` to ensure all warnings are raised.
Upon exiting, it checks the recorded warnings for the desired matching
pattern(s).
Raises a ValueError if any match was not found or an unexpected
warning was raised.
Allows for three types of behaviors: "and", "or", and "optional" matches.
This is done to accomodate different build enviroments or loop conditions
that may produce different warnings. The behaviors can be combined.
If you pass multiple patterns, you get an orderless "and", where all of the
warnings must be raised.
If you use the "|" operator in a pattern, you can catch one of several warnings.
Finally, you can use "|\A\Z" in a pattern to signify it as optional.
"""
with all_warnings() as w:
# enter context
yield w
# exited user context, check the recorded warnings
remaining = [m for m in matching if not '\A\Z' in m.split('|')]
for warn in w:
found = False
for match in matching:
if re.search(match, str(warn.message)) is not None:
found = True
if match in remaining:
remaining.remove(match)
if not found:
raise ValueError('Unexpected warning: %s' % str(warn.message))
if len(remaining) > 0:
msg = 'No warning raised matching:\n%s' % '\n'.join(remaining)
raise ValueError(msg)
| {
"repo_name": "GaZ3ll3/scikit-image",
"path": "skimage/_shared/_warnings.py",
"copies": "23",
"size": "3850",
"license": "bsd-3-clause",
"hash": 2188224426416957700,
"line_mean": 32.4782608696,
"line_max": 84,
"alpha_frac": 0.6285714286,
"autogenerated": false,
"ratio": 4.4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ["AndroidDevice"]
from adb_impl import AdbImpl
from status import *
class AndroidDevice(object):
def __init__(self, device_serial="", adb=AdbImpl(), release_callback=None):
self.device_serial = device_serial
self.release_callback = release_callback
self.xdb = adb
self.active_package = ""
def __del__(self):
if callable(self.release_callback):
self.release_callback(self.device_serial)
return
def ForwardDevtoolsPort(self, package, process, device_socket, port):
if not device_socket:
# Assunme this is a WebView app.
if not process:
(status, pid) = self.xdb.GetPidByName(self.device_serial, package)
else:
(status, pid) = self.xdb.GetPidByName(self.device_serial, process)
if status.IsError():
if not process:
status.AddDetails("process name must be specified if not equal to package name")
return status
return self.xdb.ForwardPort(self.device_serial, port, package)
def SetUp(self, package, activity, process, args, use_running_app, port):
if self.active_package:
return Status(kUnknownError, self.active_package + " was launched and has not been quit")
status = self.xdb.CheckAppInstalled(self.device_serial, package)
if status.IsError():
return status
command_line_file = ""
device_socket = ""
exec_name = ""
if not use_running_app:
status = self.xdb.ClearAppData(self.device_serial, package)
if status.IsError():
return status
if command_line_file:
status = self.xdb.SetCommandLineFile(self.device_serial, exec_name, args)
if status.IsError():
return status
status = self.xdb.Launch(self.device_serial, package, activity)
if status.IsError():
return status
self.active_package = package
self.ForwardDevtoolsPort(package, process, device_socket, port)
return status
def TearDown(self):
if self.active_package:
status = self.xdb.ForceStop(self.device_serial, self.active_package)
if status.IsError():
return status
self.active_package = ""
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/android_device.py",
"copies": "1",
"size": "2164",
"license": "bsd-3-clause",
"hash": -1060172202398184200,
"line_mean": 33.3492063492,
"line_max": 95,
"alpha_frac": 0.6654343808,
"autogenerated": false,
"ratio": 3.8098591549295775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9826402435034426,
"avg_score": 0.029778220139030302,
"num_lines": 63
} |
__all__ = ['AnonRef', 'InstRef', 'DefnRef', 'ArrayRef', 'TupleRef']
class Ref:
def __str__(self):
return self.qualifiedname()
class AnonRef(Ref):
def __init__(self, name=None):
self.name = name
def qualifiedname(self, sep='.'):
return str(self.name) if self.name else ""
def anon(self):
return False if self.name else True
class InstRef(Ref):
def __init__(self, inst, name):
assert inst
self.inst = inst # Inst
self.name = name
def qualifiedname(self, sep='.'):
name = self.name
if isinstance(self.name, (int, long)):
# Hack, Hack, Hack
if sep == '.':
return str(self.inst) + '[%d]' % self.name
return str(self.inst) + sep + str(name)
def anon(self):
return False
class DefnRef(Ref):
def __init__(self, defn, name):
assert defn
self.defn = str(defn) # Definition
self.name = name
def qualifiedname(self, sep='.'):
if sep == '.':
return self.defn + sep + self.name
else:
return self.name
def anon(self):
return False
class ArrayRef(Ref):
def __init__(self, array, index):
self.array = array # Array
self.index = index
def qualifiedname(self, sep='.'):
return self.array.name.qualifiedname(sep=sep) + \
'[%d]' % self.index
def anon(self):
return self.array.name.anon()
class TupleRef(Ref):
def __init__(self, tuple, index):
self.tuple = tuple # Tuple
self.index = index
def qualifiedname(self, sep='.'):
return self.tuple.name.qualifiedname(sep=sep) + sep + str(self.index)
def anon(self):
return self.tuple.name.anon()
if __name__ == '__main__':
a = AnonRef()
print(str(a))
a = AnonRef('x')
print(str(a))
print(isinstance(a,Ref))
| {
"repo_name": "bjmnbraun/icestick_fastio",
"path": "thirdparty/magma/magma/ref.py",
"copies": "1",
"size": "1895",
"license": "mit",
"hash": -1925645271148641800,
"line_mean": 22.3950617284,
"line_max": 76,
"alpha_frac": 0.5488126649,
"autogenerated": false,
"ratio": 3.5822306238185253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46310432887185254,
"avg_score": null,
"num_lines": null
} |
__all__ = ['AnvLocalTransport']
import os
from stat import ST_MODE, S_ISDIR, ST_SIZE, S_IMODE
import sys
import errno
import shutil
import anvillib.acls
import anvillib.fs
from cStringIO import StringIO
from bzrlib import (
atomicfile,
osutils,
symbol_versioning,
config,
debug,
errors,
remote,
trace,
transport,
urlutils,
)
from bzrlib.smart import client, medium
from bzrlib.symbol_versioning import (
deprecated_method,
)
_append_flags = os.O_CREAT | os.O_APPEND | os.O_WRONLY | osutils.O_BINARY | osutils.O_NOINHERIT
_put_non_atomic_flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | osutils.O_BINARY | osutils.O_NOINHERIT
class AnvLocalTransport(transport.Transport):
"""This is the transport agent for local filesystem access."""
def __init__(self, base, userID):
"""Set the base path where files will be stored."""
self.user = anvillib.acls.UserFS(userID)
if not base.startswith('file://'):
symbol_versioning.warn(
"Instantiating AnvLocalTransport with a filesystem path"
" is deprecated as of bzr 0.8."
" Please use bzrlib.transport.get_transport()"
" or pass in a file:// url.",
DeprecationWarning,
stacklevel=2
)
base = urlutils.local_path_to_url(base)
if base[-1] != '/':
base = base + '/'
# Special case : windows has no "root", but does have
# multiple lettered drives inside it. #240910
if sys.platform == 'win32' and base == 'file:///':
base = ''
self._local_base = ''
super(AnvLocalTransport, self).__init__(base)
return
super(AnvLocalTransport, self).__init__(base)
self._local_base = urlutils.local_path_from_url(base)
def clone(self, offset=None):
"""Return a new AnvLocalTransport with root at self.base + offset
Because the local filesystem does not require a connection,
we can just return a new object.
"""
if offset is None:
return AnvLocalTransport(self.base)
else:
abspath = self.abspath(offset)
if abspath == 'file://':
# fix upwalk for UNC path
# when clone from //HOST/path updir recursively
# we should stop at least at //HOST part
abspath = self.base
return AnvLocalTransport(abspath)
def _anvilise_path(self, somepath):
path = somepath
if path.startswith("%2A"):
user = path[3:path.find("/")]
branch = path[(path.find("/") + 1):]
if self.user.username != user:
return "/dev/null"
path = anvillib.fs.user_branch_dir(user, branch)
else:
project = path[0:path.find("/")]
branch = path[(path.find("/") + 1):]
if not self.user.can_access_project(project):
return "/dev/null"
path = anvillib.fs.project_branch_dir(project, branch)
return path
def _abspath(self, relative_reference):
"""Return a path for use in os calls.
Several assumptions are made:
- relative_reference does not contain '..'
- relative_reference is url escaped.
"""
relative_reference = self._anvilise_path(relative_reference)
if relative_reference in ('.', ''):
# _local_base normally has a trailing slash; strip it so that stat
# on a transport pointing to a symlink reads the link not the
# referent but be careful of / and c:\
return osutils.split(self._local_base)[0]
#return self._local_base + urlutils.unescape(relative_reference)
return urlutils.unescape(relative_reference)
def abspath(self, relpath):
"""Return the full url to the given relative URL."""
# TODO: url escape the result. RBC 20060523.
# jam 20060426 Using normpath on the real path, because that ensures
# proper handling of stuff like
relpath = self._anvilise_path(relpath)
path = osutils.normpath(osutils.pathjoin(
self._local_base, urlutils.unescape(relpath)))
# on windows, our _local_base may or may not have a drive specified
# (ie, it may be "/" or "c:/foo").
# If 'relpath' is '/' we *always* get back an abspath without
# the drive letter - but if our transport already has a drive letter,
# we want our abspaths to have a drive letter too - so handle that
# here.
if (sys.platform == "win32" and self._local_base[1:2] == ":"
and path == '/'):
path = self._local_base[:3]
return urlutils.local_path_to_url(path)
def local_abspath(self, relpath):
"""Transform the given relative path URL into the actual path on disk
This function only exists for the AnvLocalTransport, since it is
the only one that has direct local access.
This is mostly for stuff like WorkingTree which needs to know
the local working directory. The returned path will always contain
forward slashes as the path separator, regardless of the platform.
This function is quite expensive: it calls realpath which resolves
symlinks.
"""
absurl = self.abspath(relpath)
# mutter(u'relpath %s => base: %s, absurl %s', relpath, self.base, absurl)
return urlutils.local_path_from_url(absurl)
def relpath(self, abspath):
"""Return the local path portion from a given absolute path.
"""
if abspath is None:
abspath = u'.'
return urlutils.file_relpath(
urlutils.strip_trailing_slash(self.base),
urlutils.strip_trailing_slash(abspath))
def has(self, relpath):
return os.access(self._abspath(relpath), os.F_OK)
def get(self, relpath):
"""Get the file at the given relative path.
:param relpath: The relative path to the file
"""
logf = open("/tmp/anvserve", "a")
canonical_url = self.abspath(relpath)
if canonical_url in transport._file_streams:
transport._file_streams[canonical_url].flush()
filectt = None
try:
path = self._abspath(relpath)
logf.write("Get " + relpath + " => " + path + "... ")
filectt = osutils.open_file(path, 'rb')
logf.write("file succesffuly opened")
except (IOError, OSError),e:
if e.errno == errno.EISDIR:
filectt = LateReadError(relpath)
self._translate_error(e, path)
logf.write("\n")
logf.close()
return filectt
def put_file(self, relpath, f, mode=None):
"""Copy the file-like object into the location.
:param relpath: Location to put the contents, relative to base.
:param f: File-like object.
:param mode: The mode for the newly created file,
None means just use the default
"""
path = relpath
try:
path = self._abspath(relpath)
osutils.check_legal_path(path)
fp = atomicfile.AtomicFile(path, 'wb', new_mode=mode)
except (IOError, OSError),e:
self._translate_error(e, path)
try:
length = self._pump(f, fp)
fp.commit()
finally:
fp.close()
return length
def put_bytes(self, relpath, bytes, mode=None):
"""Copy the string into the location.
:param relpath: Location to put the contents, relative to base.
:param bytes: String
"""
path = relpath
try:
path = self._abspath(relpath)
osutils.check_legal_path(path)
fp = atomicfile.AtomicFile(path, 'wb', new_mode=mode)
except (IOError, OSError),e:
self._translate_error(e, path)
try:
if bytes:
fp.write(bytes)
fp.commit()
finally:
fp.close()
def _put_non_atomic_helper(self, relpath, writer,
mode=None,
create_parent_dir=False,
dir_mode=None):
"""Common functionality information for the put_*_non_atomic.
This tracks all the create_parent_dir stuff.
:param relpath: the path we are putting to.
:param writer: A function that takes an os level file descriptor
and writes whatever data it needs to write there.
:param mode: The final file mode.
:param create_parent_dir: Should we be creating the parent directory
if it doesn't exist?
"""
abspath = self._abspath(relpath)
if mode is None:
# os.open() will automatically use the umask
local_mode = 0666
else:
local_mode = mode
try:
fd = os.open(abspath, _put_non_atomic_flags, local_mode)
except (IOError, OSError),e:
# We couldn't create the file, maybe we need to create
# the parent directory, and try again
if (not create_parent_dir
or e.errno not in (errno.ENOENT,errno.ENOTDIR)):
self._translate_error(e, relpath)
parent_dir = os.path.dirname(abspath)
if not parent_dir:
self._translate_error(e, relpath)
self._mkdir(parent_dir, mode=dir_mode)
# We created the parent directory, lets try to open the
# file again
try:
fd = os.open(abspath, _put_non_atomic_flags, local_mode)
except (IOError, OSError), e:
self._translate_error(e, relpath)
try:
st = os.fstat(fd)
if mode is not None and mode != S_IMODE(st.st_mode):
# Because of umask, we may still need to chmod the file.
# But in the general case, we won't have to
os.chmod(abspath, mode)
writer(fd)
finally:
os.close(fd)
def put_file_non_atomic(self, relpath, f, mode=None,
create_parent_dir=False,
dir_mode=None):
"""Copy the file-like object into the target location.
This function is not strictly safe to use. It is only meant to
be used when you already know that the target does not exist.
It is not safe, because it will open and truncate the remote
file. So there may be a time when the file has invalid contents.
:param relpath: The remote location to put the contents.
:param f: File-like object.
:param mode: Possible access permissions for new file.
None means do not set remote permissions.
:param create_parent_dir: If we cannot create the target file because
the parent directory does not exist, go ahead and
create it, and then try again.
"""
def writer(fd):
self._pump_to_fd(f, fd)
self._put_non_atomic_helper(relpath, writer, mode=mode,
create_parent_dir=create_parent_dir,
dir_mode=dir_mode)
def put_bytes_non_atomic(self, relpath, bytes, mode=None,
create_parent_dir=False, dir_mode=None):
def writer(fd):
if bytes:
os.write(fd, bytes)
self._put_non_atomic_helper(relpath, writer, mode=mode,
create_parent_dir=create_parent_dir,
dir_mode=dir_mode)
def iter_files_recursive(self):
"""Iter the relative paths of files in the transports sub-tree."""
queue = list(self.list_dir(u'.'))
while queue:
relpath = queue.pop(0)
st = self.stat(relpath)
if S_ISDIR(st[ST_MODE]):
for i, basename in enumerate(self.list_dir(relpath)):
queue.insert(i, relpath+'/'+basename)
else:
yield relpath
def _mkdir(self, abspath, mode=None):
"""Create a real directory, filtering through mode"""
if mode is None:
# os.mkdir() will filter through umask
local_mode = 0777
else:
local_mode = mode
try:
os.mkdir(abspath, local_mode)
if mode is not None:
# It is probably faster to just do the chmod, rather than
# doing a stat, and then trying to compare
os.chmod(abspath, mode)
except (IOError, OSError),e:
self._translate_error(e, abspath)
def mkdir(self, relpath, mode=None):
"""Create a directory at the given path."""
self._mkdir(self._abspath(relpath), mode=mode)
def open_write_stream(self, relpath, mode=None):
"""See Transport.open_write_stream."""
# initialise the file
self.put_bytes_non_atomic(relpath, "", mode=mode)
abspath = self._abspath(relpath)
handle = osutils.open_file(abspath, 'wb')
if mode is not None:
self._check_mode_and_size(abspath, handle.fileno(), mode)
transport._file_streams[self.abspath(relpath)] = handle
return transport.FileFileStream(self, relpath, handle)
def _get_append_file(self, relpath, mode=None):
"""Call os.open() for the given relpath"""
file_abspath = self._abspath(relpath)
if mode is None:
# os.open() will automatically use the umask
local_mode = 0666
else:
local_mode = mode
try:
return file_abspath, os.open(file_abspath, _append_flags, local_mode)
except (IOError, OSError),e:
self._translate_error(e, relpath)
def _check_mode_and_size(self, file_abspath, fd, mode=None):
"""Check the mode of the file, and return the current size"""
st = os.fstat(fd)
if mode is not None and mode != S_IMODE(st.st_mode):
# Because of umask, we may still need to chmod the file.
# But in the general case, we won't have to
os.chmod(file_abspath, mode)
return st.st_size
def append_file(self, relpath, f, mode=None):
"""Append the text in the file-like object into the final location."""
file_abspath, fd = self._get_append_file(relpath, mode=mode)
try:
result = self._check_mode_and_size(file_abspath, fd, mode=mode)
self._pump_to_fd(f, fd)
finally:
os.close(fd)
return result
def append_bytes(self, relpath, bytes, mode=None):
"""Append the text in the string into the final location."""
file_abspath, fd = self._get_append_file(relpath, mode=mode)
try:
result = self._check_mode_and_size(file_abspath, fd, mode=mode)
if bytes:
os.write(fd, bytes)
finally:
os.close(fd)
return result
def _pump_to_fd(self, fromfile, to_fd):
"""Copy contents of one file to another."""
BUFSIZE = 32768
while True:
b = fromfile.read(BUFSIZE)
if not b:
break
os.write(to_fd, b)
def copy(self, rel_from, rel_to):
"""Copy the item at rel_from to the location at rel_to"""
path_from = self._abspath(rel_from)
path_to = self._abspath(rel_to)
try:
shutil.copy(path_from, path_to)
except (IOError, OSError),e:
# TODO: What about path_to?
self._translate_error(e, path_from)
def rename(self, rel_from, rel_to):
path_from = self._abspath(rel_from)
path_to = self._abspath(rel_to)
try:
# *don't* call bzrlib.osutils.rename, because we want to
# detect conflicting names on rename, and osutils.rename tries to
# mask cross-platform differences there
os.rename(path_from, path_to)
except (IOError, OSError),e:
# TODO: What about path_to?
self._translate_error(e, path_from)
def move(self, rel_from, rel_to):
"""Move the item at rel_from to the location at rel_to"""
path_from = self._abspath(rel_from)
path_to = self._abspath(rel_to)
try:
# this version will delete the destination if necessary
osutils.rename(path_from, path_to)
except (IOError, OSError),e:
# TODO: What about path_to?
self._translate_error(e, path_from)
def delete(self, relpath):
"""Delete the item at relpath"""
path = relpath
try:
path = self._abspath(relpath)
os.remove(path)
except (IOError, OSError),e:
self._translate_error(e, path)
def external_url(self):
"""See bzrlib.transport.Transport.external_url."""
# File URL's are externally usable.
return self.base
def copy_to(self, relpaths, other, mode=None, pb=None):
"""Copy a set of entries from self into another Transport.
:param relpaths: A list/generator of entries to be copied.
"""
if isinstance(other, AnvLocalTransport):
# Both from & to are on the local filesystem
# Unfortunately, I can't think of anything faster than just
# copying them across, one by one :(
total = self._get_total(relpaths)
count = 0
for path in relpaths:
self._update_pb(pb, 'copy-to', count, total)
try:
mypath = self._abspath(path)
otherpath = other._abspath(path)
shutil.copy(mypath, otherpath)
if mode is not None:
os.chmod(otherpath, mode)
except (IOError, OSError),e:
self._translate_error(e, path)
count += 1
return count
else:
return super(AnvLocalTransport, self).copy_to(relpaths, other, mode=mode, pb=pb)
def listable(self):
"""See Transport.listable."""
return True
def list_dir(self, relpath):
"""Return a list of all files at the given location.
WARNING: many transports do not support this, so trying avoid using
it if at all possible.
"""
path = self._abspath(relpath)
try:
entries = os.listdir(path)
except (IOError, OSError), e:
self._translate_error(e, path)
return [urlutils.escape(entry) for entry in entries]
def stat(self, relpath):
"""Return the stat information for a file.
"""
path = relpath
try:
path = self._abspath(relpath)
return os.lstat(path)
except (IOError, OSError),e:
self._translate_error(e, path)
def lock_read(self, relpath):
"""Lock the given file for shared (read) access.
:return: A lock object, which should be passed to Transport.unlock()
"""
from bzrlib.lock import ReadLock
path = relpath
try:
path = self._abspath(relpath)
return ReadLock(path)
except (IOError, OSError), e:
self._translate_error(e, path)
def lock_write(self, relpath):
"""Lock the given file for exclusive (write) access.
WARNING: many transports do not support this, so trying avoid using it
:return: A lock object, which should be passed to Transport.unlock()
"""
from bzrlib.lock import WriteLock
return WriteLock(self._abspath(relpath))
def rmdir(self, relpath):
"""See Transport.rmdir."""
path = relpath
try:
path = self._abspath(relpath)
os.rmdir(path)
except (IOError, OSError),e:
self._translate_error(e, path)
if osutils.host_os_dereferences_symlinks():
def readlink(self, relpath):
"""See Transport.readlink."""
return osutils.readlink(self._abspath(relpath))
if osutils.hardlinks_good():
def hardlink(self, source, link_name):
"""See Transport.link."""
try:
os.link(self._abspath(source), self._abspath(link_name))
except (IOError, OSError), e:
self._translate_error(e, source)
if osutils.has_symlinks():
def symlink(self, source, link_name):
"""See Transport.symlink."""
abs_link_dirpath = urlutils.dirname(self.abspath(link_name))
source_rel = urlutils.file_relpath(
urlutils.strip_trailing_slash(abs_link_dirpath),
urlutils.strip_trailing_slash(self.abspath(source))
)
try:
os.symlink(source_rel, self._abspath(link_name))
except (IOError, OSError), e:
self._translate_error(e, source_rel)
def _can_roundtrip_unix_modebits(self):
if sys.platform == 'win32':
# anyone else?
return False
else:
return True
| {
"repo_name": "Etenil/anvil",
"path": "bzrplugins/anvserve/anvtransport.py",
"copies": "1",
"size": "21449",
"license": "mit",
"hash": 8216786460539486000,
"line_mean": 36.8289241623,
"line_max": 102,
"alpha_frac": 0.5628700639,
"autogenerated": false,
"ratio": 4.182722308892355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5245592372792356,
"avg_score": null,
"num_lines": null
} |
__all__ = ['ApiFramework']
__API_LIST = {
'document':{
'get': 'Get document',
},
}
class ApiError(AttributeError):
def __init__(self, path):
self.message = "api has no attribute '%s'"%'.'.join(path)
self.args = (self.message,)
class ApiFramework(object):
def __init__(self, apiList = {}, warning = False, path = []):
self.apiList = apiList
self.warning = apiList != {} and warning
self.path = path
def __getattr__(self, s):
try:
tmpApi = self.apiList
for name in self.path + [s]: tmpApi = tmpApi[name]
except:
if self.warning: raise ApiError(self.path + [s])
api = ApiFramework(self.apiList, self.warning, self.path + [s])
else:
api = ApiFramework(self.apiList, self.warning, self.path + [s])
api.__doc__ = tmpApi
return api
def __call__(self, *args, **kwargs):
return self.path
if __name__ == '__main__':
api = ApiFramework(__API_LIST, True)
print(api.document.get())
print(api.document.send())
| {
"repo_name": "littlecodersh/EasierLife",
"path": "Plugins/ApiFramework/ApiFramework.py",
"copies": "1",
"size": "1128",
"license": "mit",
"hash": -7010296666285058000,
"line_mean": 31.1764705882,
"line_max": 75,
"alpha_frac": 0.5265957447,
"autogenerated": false,
"ratio": 3.6623376623376624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46889334070376626,
"avg_score": null,
"num_lines": null
} |
__all__ = ['API']
from functools import wraps
import os
import flask
from flask import request, json
from . import predictors
from . import recommenders
models = (predictors, recommenders)
USERNAME = 'fickle'
def Response(data=None, status=200):
if data:
body = json.dumps(data)
else:
body = None
return flask.Response(body, status=status, mimetype='application/json')
def SuccessResponse(status=200):
return Response(None, status=status)
def ErrorResponse(status=400):
return Response(status=status)
def API(name, backend=None):
app = flask.Flask(name)
app.config['DEBUG'] = bool(os.environ.get('FICKLE_DEBUG'))
if backend is None:
__model = os.environ.get('FICKLE_MODEL', 'GenericSVMClassifier')
model = next((getattr(m, __model) for m in models
if hasattr(m, __model)))
backend = model()
__password = os.environ.get('FICKLE_PASSWORD')
def check_auth(username, password):
if __password:
return username == USERNAME and password == __password
else:
return True
def requires_auth(f):
if not __password:
return f
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return ErrorResponse(status=403)
return f(*args, **kwargs)
return decorated
@app.route('/')
@requires_auth
def api_root():
return SuccessResponse()
@app.route('/load', methods=['POST'])
@requires_auth
def api_load():
write = bool(request.args.get('write', None))
backend.load(request.json, write=write)
return SuccessResponse(status=201)
@app.route('/fit', methods=['POST'])
@requires_auth
def api_fit():
try:
backend.fit()
except RuntimeError:
return ErrorResponse(status=501)
return SuccessResponse()
@app.route('/predict', methods=['POST'])
@requires_auth
def api_predict():
try:
data = backend.predict(request.json)
except RuntimeError:
return ErrorResponse(status=501)
return Response(data)
@app.route('/recommend', methods=['POST'])
@requires_auth
def api_recommend():
if not hasattr(backend, 'recommend'):
return ErrorResponse(status=406)
keys = request.json
args = dict()
n = request.args.get('n', None)
if n is not None:
args['n'] = int(n)
try:
data = backend.recommend(keys, **args)
except RuntimeError:
return ErrorResponse(status=500)
except ValueError:
return ErrorResponse(status=400)
return Response(data)
return app
| {
"repo_name": "norbert/fickle",
"path": "fickle/api.py",
"copies": "1",
"size": "2866",
"license": "mit",
"hash": 4088026574043957000,
"line_mean": 24.8198198198,
"line_max": 75,
"alpha_frac": 0.5917655269,
"autogenerated": false,
"ratio": 4.094285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5186051241185714,
"avg_score": null,
"num_lines": null
} |
"""All api routes regarding modules."""
from flask import g
from webargs.flaskparser import use_args
from server import user_bp
from server.extensions import db
from server.data.types import ModuleTypes
from server.responses import created, ok, bad_request
from server.schemas.module import ModuleSchema
from server.models import (
Module,
Pod,
Queue,
QueueEntry,
)
@user_bp.route('/api/modules', methods=['GET'])
def get_module_meta():
"""Get the meta information about all modules."""
from server.data.data import module_data
return ok(module_data)
# This route returns the list of all modules and
# their current levels for the pod of the current user.
@user_bp.route('/api/pod/<uuid:pod_id>/modules', methods=['GET'])
def get_pod_modules(pod_id):
"""Get all modules and their information from a specific pod."""
pod = db.session.query(Pod).get(pod_id)
schema = ModuleSchema()
return ok(schema.dump(pod.modules, many=True).data)
@user_bp.route('/api/pod/<uuid:pod_id>/modules', methods=['POST'])
@use_args(ModuleSchema(only=['module_type', 'stationary', 'x_pos', 'y_pos']))
def new_pod_module(args, pod_id):
"""Place a new module on the pod grid."""
from server.data.data import module_data
pod = db.session.query(Pod).get(pod_id)
if pod.user_id != g.current_user.id:
return bad_request(f"Pod doesn't belong to current user.")
# Check for valid module type
module_type = args['module_type']
stationary = args.get('stationary')
x_pos = args.get('x_pos')
y_pos = args.get('y_pos')
if module_type not in ModuleTypes.__members__:
return bad_request(f'Unknown Module type: {module_type}')
# Check if we already have a module with this type
# at the specified position.
if stationary:
existing_module = db.session.query(Module) \
.filter(Module.pod_id == pod.id) \
.filter(Module.stationary.is_(True)) \
.filter(Module.type == module_type) \
.first()
else:
existing_module = db.session.query(Module) \
.filter(Module.pod_id == pod.id) \
.filter(Module.x_pos == x_pos) \
.filter(Module.y_pos == y_pos) \
.first()
if existing_module:
return bad_request('There already is a module at this position')
# Check if we have enough resources
module_level = module_data[module_type]['levels'][0]
requirements = module_level['resources']
enough, missing = pod.enough_resources(requirements)
if not enough:
return bad_request(f'Not enough resources: {missing}')
# Subtract the resources from the pod and create a queue entry.
pod.subtract_resources(requirements)
module = Module(module_type, pod, 0, stationary, x_pos, y_pos)
queue_entry = QueueEntry(
pod.queue, 0,
module_level['duration'], module=module
)
pod.queue.next_entry()
db.session.add(queue_entry)
db.session.add(module)
db.session.commit()
return created()
@user_bp.route('/api/pod/<uuid:pod_id>/modules/<uuid:module_id>', methods=['PUT'])
def upgrade_pod_module(pod_id, module_id):
"""Update a module on the pod grid."""
from server.data.data import module_data
pod = db.session.query(Pod).get(pod_id)
if pod.user_id != g.current_user.id:
return bad_request(f"Pod doesn't belong to current user.")
# Check if we already have a module with this type
# at the specified position.
module = db.session.query(Module).get(module_id)
if not module:
return bad_request('No module with this id')
next_level = module.level + 1
highest_queue_entry = db.session.query(QueueEntry) \
.filter(QueueEntry.module == module) \
.join(Queue) \
.filter(Queue.pod == pod) \
.order_by(QueueEntry.level.desc()) \
.first()
if highest_queue_entry:
next_level = highest_queue_entry.level + 1
# Ensure we didn't reach max level.
if next_level >= len(module_data[module.type]['levels']):
return bad_request("Max level reached.")
module_level = module_data[module.type]['levels'][next_level]
# Ensure we have enough resources
requirements = module_level['resources']
enough, missing = pod.enough_resources(requirements)
if not enough:
return bad_request(f'Not enough resources: {missing}')
# Subtract the resources from the pod and create a queue entry.
pod.subtract_resources(requirements)
queue_entry = QueueEntry(
pod.queue, next_level,
module_level['duration'], module=module,
)
pod.queue.next_entry()
db.session.add(queue_entry)
db.session.add(module)
db.session.commit()
return ok()
| {
"repo_name": "Nukesor/spacesurvival",
"path": "server/api/module.py",
"copies": "1",
"size": "4764",
"license": "mit",
"hash": 1790522609631706000,
"line_mean": 31.6301369863,
"line_max": 82,
"alpha_frac": 0.6526028547,
"autogenerated": false,
"ratio": 3.6422018348623855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4794804689562385,
"avg_score": null,
"num_lines": null
} |
__all__ = ('app', )
from flask import Flask
from web.config import config
from time import time, localtime, strftime
def delta_time(seconds):
seconds = int(seconds)
minutes = seconds // 60
hours = minutes // 60
minutes = minutes % 60
days = hours // 24
hours = hours % 24
seconds = seconds % 60
k = {'seconds': seconds, 'minutes': minutes,
'hours': hours, 'days': days}
if days:
return '%(days)sj %(hours)dh %(minutes)dm %(seconds)ds' % k
elif hours:
return '%(hours)dh %(minutes)dm %(seconds)ds' % k
elif minutes:
return '%(minutes)dm %(seconds)ds' % k
else:
return '%(seconds)ds' % k
app = Flask(__name__)
app.secret_key = config.get('www', 'secret_key')
app.jinja_env.globals['time'] = time
app.jinja_env.filters['delta_time'] = delta_time
@app.template_filter('datetimeformat')
def datetimeformat(value, format='%d/%m/%Y %H:%M'):
try:
r=strftime(format, localtime(float(value)))
#"%Y-%m-%d %H:%M:%S"
except:
r=''
return r
| {
"repo_name": "kivy/p4a-cloud",
"path": "master/web/__init__.py",
"copies": "1",
"size": "1057",
"license": "mit",
"hash": 706272996997654300,
"line_mean": 25.425,
"line_max": 67,
"alpha_frac": 0.5894039735,
"autogenerated": false,
"ratio": 3.3878205128205128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4477224486320513,
"avg_score": null,
"num_lines": null
} |
__all__ = ('AppProgressBar', )
from kivy.properties import NumericProperty, AliasProperty
from kivy.uix.widget import Widget
from kivy.lang import Builder
Builder.load_string('''
<AppProgressBar>:
canvas:
Color:
rgb: 1, 1, 1
BorderImage:
border: (12, 12, 12, 12)
pos: self.x, self.center_y - 12
size: self.width, 24
source: 'atlas://data/images/defaulttheme/progressbar_background'
BorderImage:
border: [int(min(self.width * (self.value / float(self.max)) if self.max else 0, 12))] * 4
pos: self.x, self.center_y - 12
size: self.width * (self.value / float(self.max)) if self.max else 0, 24
source: 'atlas://data/images/defaulttheme/progressbar'
''')
class AppProgressBar(Widget):
'''Class for creating a progress bar widget.
See module documentation for more details.
'''
def __init__(self, **kwargs):
self._value = 0.
super(AppProgressBar, self).__init__(**kwargs)
def _get_value(self):
return self._value
def _set_value(self, value):
value = max(0, min(self.max, value))
if value != self._value:
self._value = value
return True
value = AliasProperty(_get_value, _set_value)
'''Current value used for the slider.
:attr:`value` is an :class:`~kivy.properties.AliasProperty` that
returns the value of the progress bar. If the value is < 0 or >
:attr:`max`, it will be normalized to those boundaries.
.. versionchanged:: 1.6.0
The value is now limited to between 0 and :attr:`max`.
'''
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'max'))
'''Normalized value inside the range 0-1::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> pb.value_normalized
0.5
:attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
max = NumericProperty(100.)
'''Maximum value allowed for :attr:`value`.
:attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to
100.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(AppProgressBar(value=50))
| {
"repo_name": "Bakterija/mmplayer",
"path": "mmplayer/widgets/progressbar.py",
"copies": "1",
"size": "2536",
"license": "mit",
"hash": -3623676589573187000,
"line_mean": 27.8181818182,
"line_max": 102,
"alpha_frac": 0.5863564669,
"autogenerated": false,
"ratio": 3.8020989505247376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.988723533539369,
"avg_score": 0.00024401640620943876,
"num_lines": 88
} |
__all__ = ["app", "version", "util", "module"]
from typing import Callable, List
class Wutu(object):
"""
an external API for Wutu Framework
"""
def __init__(self, index: str, ngmodules: List[str]=None, minify: bool=False) -> None:
from wutu import app
self.app = app.create(index=index, ngmodules=ngmodules, minify=minify)
def create_module(self, fn: Callable):
"""
Creates a Wutu module from your defined function.
This function must include dict with following (each is optional) methods:
'get', 'post', 'put', 'delete' each corresponds to HTTP methods.
Special methods:
- 'get_controller' - returns AngularJS JavaScript controller text (Can provide using Wutu.load_js method)
- 'get_service' - returns AngularJS JavaScript service text (auto-generated by default)
:param fn: Module function. Module name is automatically generated from function name
:return:
"""
from wutu.decorators import create_module
return create_module(self.app.api)(fn)
def run(self, *args, **kwargs) -> None:
"""
Runs web app. Arguments are same as Flask, including:
- host: on which address to bind (default: localhost)
- port: on which port to bind (default: 5000)
- debug: show debug info on errors (default: False)
:param args:
:param kwargs:
:return:
"""
self.app.run(*args, **kwargs)
@staticmethod
def load_js(name: str) -> str:
"""
Loads and exposes JavaScript file
:param name: name of the file
:return: JavaScript object
"""
from wutu.util import load_js
return load_js(name)
| {
"repo_name": "zaibacu/wutu",
"path": "wutu/__init__.py",
"copies": "1",
"size": "1769",
"license": "mit",
"hash": -913916092921930600,
"line_mean": 35.8541666667,
"line_max": 117,
"alpha_frac": 0.6088185415,
"autogenerated": false,
"ratio": 4.085450346420323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002002210872076378,
"num_lines": 48
} |
__all__ = ('Argument', 'Integer', 'Bytes', 'Float', 'Boolean', 'String', )
# Parts of the following code are ported from the Twisted source:
# http://twistedmatrix.com/trac/browser/tags/releases/twisted-8.2.0/twisted/protocols/amp.py#L2042
class Argument:
"""
Base-class of all objects that take values from Amp packets and convert
them into objects for Python functions.
"""
type = object
def __init__(self, optional=False):
self.optional = optional
def decode(self, data):
""" Convert network bytes to a Python object. """
raise NotImplementedError
def encode(self, obj):
""" Convert a Python object into bytes for passing over the network. """
raise NotImplementedError
class Integer(Argument):
""" Encode any integer values of any size on the wire. """
type = int
decode = int
def encode(self, obj):
return str(int(obj)).encode('ascii')
class Bytes(Argument):
""" Don't do any conversion at all; just pass through 'bytes'. """
type = bytes
def encode(self, obj):
return obj
def decode(self, data):
return data
class Float(Argument):
""" Encode floating-point values on the wire as their repr. """
type = float
def encode(self, obj):
return repr(obj).encode('ascii')
def decode(self, obj):
return float(obj)
class Boolean(Argument):
""" Encode True or False as "True" or "False" on the wire. """
type = bool
def decode(self, data):
if data == b'True':
return True
elif data == b'False':
return False
else:
raise TypeError("Bad boolean value: %r" % data)
def encode(self, obj):
if obj:
return b'True'
else:
return b'False'
class String(Argument):
""" Encode a unicode string on the wire as UTF-8. """
encoding = 'utf-8'
type = str
def encode(self, obj):
return obj.encode(self.encoding)
def decode(self, data):
return data.decode(self.encoding)
| {
"repo_name": "jonathanslenders/asyncio-amp",
"path": "asyncio_amp/arguments.py",
"copies": "1",
"size": "2085",
"license": "bsd-2-clause",
"hash": -7624163702071818000,
"line_mean": 23.8214285714,
"line_max": 98,
"alpha_frac": 0.6038369305,
"autogenerated": false,
"ratio": 4.072265625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51761025555,
"avg_score": null,
"num_lines": null
} |
__all__ = ["AskString"]
import objc
import Foundation
import AppKit
NSApp = AppKit.NSApp
# class defined in AskString.xib
class AskStringWindowController(AppKit.NSWindowController):
questionLabel = objc.IBOutlet()
textField = objc.IBOutlet()
def __new__(cls, question, resultCallback, default="", parentWindow=None):
self = cls.alloc().initWithWindowNibName_("AskString")
self.question = question
self.resultCallback = resultCallback
self.default = default
self.parentWindow = parentWindow
if self.parentWindow is None:
self.window().setFrameUsingName_("AskStringPanel")
self.setWindowFrameAutosaveName_("AskStringPanel")
self.showWindow_(self)
else:
NSApp().beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window(), self.parentWindow, None, None, 0)
self.retain()
return self
def windowWillClose_(self, notification):
self.autorelease()
def awakeFromNib(self):
self.questionLabel.setStringValue_(self.question)
self.textField.setStringValue_(self.default)
def done(self):
if self.parentWindow is None:
self.close()
else:
sheet = self.window()
NSApp().endSheet_(sheet)
sheet.orderOut_(self)
def ok_(self, sender):
value = self.textField.stringValue()
self.done()
self.resultCallback(value)
def cancel_(self, sender):
self.done()
self.resultCallback(None)
def AskString(question, resultCallback, default="", parentWindow=None):
AskStringWindowController(question, resultCallback, default, parentWindow)
| {
"repo_name": "karstenw/nodebox-pyobjc",
"path": "nodebox/gui/mac/AskString.py",
"copies": "1",
"size": "1739",
"license": "mit",
"hash": -8494512516794226000,
"line_mean": 30.0535714286,
"line_max": 88,
"alpha_frac": 0.6515238643,
"autogenerated": false,
"ratio": 4.101415094339623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252938958639622,
"avg_score": null,
"num_lines": null
} |
__all__ = ['assert_nodes_equal', 'assert_edges_equal', 'assert_graphs_equal',
'almost_equal']
def almost_equal(x, y, places=7):
return round(abs(x - y), places) == 0
def assert_nodes_equal(nodes1, nodes2):
# Assumes iterables of nodes, or (node,datadict) tuples
nlist1 = list(nodes1)
nlist2 = list(nodes2)
try:
d1 = dict(nlist1)
d2 = dict(nlist2)
except (ValueError, TypeError):
d1 = dict.fromkeys(nlist1)
d2 = dict.fromkeys(nlist2)
assert d1 == d2
def assert_edges_equal(edges1, edges2):
# Assumes iterables with u,v nodes as
# edge tuples (u,v), or
# edge tuples with data dicts (u,v,d), or
# edge tuples with keys and data dicts (u,v,k, d)
from collections import defaultdict
d1 = defaultdict(dict)
d2 = defaultdict(dict)
c1 = 0
for c1, e in enumerate(edges1):
u, v = e[0], e[1]
data = [e[2:]]
if v in d1[u]:
data = d1[u][v] + data
d1[u][v] = data
d1[v][u] = data
c2 = 0
for c2, e in enumerate(edges2):
u, v = e[0], e[1]
data = [e[2:]]
if v in d2[u]:
data = d2[u][v] + data
d2[u][v] = data
d2[v][u] = data
assert c1 == c2
# can check one direction because lengths are the same.
for n, nbrdict in d1.items():
for nbr, datalist in nbrdict.items():
assert n in d2
assert nbr in d2[n]
d2datalist = d2[n][nbr]
for data in datalist:
assert datalist.count(data) == d2datalist.count(data)
def assert_graphs_equal(graph1, graph2):
assert graph1.adj == graph2.adj
assert graph1.nodes == graph2.nodes
assert graph1.graph == graph2.graph
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/networkx/testing/utils.py",
"copies": "1",
"size": "1752",
"license": "mit",
"hash": -8667561843033779000,
"line_mean": 28.2,
"line_max": 77,
"alpha_frac": 0.5599315068,
"autogenerated": false,
"ratio": 3.095406360424028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41553378672240276,
"avg_score": null,
"num_lines": null
} |
"""All assumptions are either loaded in this file or definied here
"""
from energy_demand.read_write import read_data
from energy_demand.technologies import tech_related
from energy_demand.basic import testing_functions, date_prop
from energy_demand.assumptions import fuel_shares
from energy_demand.initalisations import helpers
from energy_demand.profiles import hdd_cdd
from energy_demand.read_write import narrative_related
from energy_demand.basic import lookup_tables
class Assumptions(object):
"""Assumptions of energy demand model
Arguments
---------
base_yr : int, default=None
Base year
curr_yr : int, default=None
Current year
sim_yrs : list, default=None
Simulated years
paths : dict, default=None
Paths
enduses : list, default=None
All modelled end uses
sectors : list, default=None
All modelled sectors
fueltypes : dict, default=None
Fueltype lookup
fueltypes_nr : int, default=None
Number of modelled fueltypes
"""
def __init__(
self,
lookup_enduses=None,
lookup_sector_enduses=None,
base_yr=None,
weather_by=None,
simulation_end_yr=None,
curr_yr=None,
sim_yrs=None,
paths=None,
enduses=None,
sectors=None,
reg_nrs=None
):
"""Constructor
"""
self.lookup_enduses = lookup_enduses
self.lookup_sector_enduses = lookup_sector_enduses
self.submodels_names = lookup_tables.basic_lookups()['submodels_names']
self.nr_of_submodels = len(self.submodels_names)
self.fueltypes = lookup_tables.basic_lookups()['fueltypes']
self.fueltypes_nr = lookup_tables.basic_lookups()['fueltypes_nr']
self.base_yr = base_yr
self.weather_by = weather_by
self.reg_nrs = reg_nrs
self.simulation_end_yr = simulation_end_yr
self.curr_yr = curr_yr
self.sim_yrs = sim_yrs
# ============================================================
# Spatially modelled variables
#
# If spatial explicit diffusion is modelled, all parameters
# or technologies having a spatial explicit diffusion need
# to be defined.
# ============================================================
self.spatial_explicit_diffusion = 0 #0: False, 1: True
# Define all variables which are affected by regional diffusion
self.spatially_modelled_vars = [] # ['smart_meter_p']
# Define technologies which are affected by spatial explicit diffusion
self.techs_affected_spatial_f = ['heat_pumps_electricity']
# Max penetration speed
self.speed_con_max = 1 #1.5 # 1: uniform distribution >1: regional differences
# ============================================================
# Model calibration factors
# ============================================================
#
# These calibration factors are used to match the modelled
# electrictiy demand better with the validation data.
#
# Weekend effects are used to distribut energy demands
# between working and weekend days. With help of these
# factors, the demand on weekends and holidays can be
# be lowered compared to working days.
# This factor can be applied either directly to an enduse
# or to the hdd or cdd calculations (to correct cooling
# or heating demand)
#
# f_ss_cooling_weekend : float
# Weekend effect for cooling enduses
# f_ss_weekend : float
# WWeekend effect for service submodel enduses
# f_is_weekend : float
# Weekend effect for industry submodel enduses
# f_mixed_floorarea : float
# Share of floor_area which is assigned to either
# residential or non_residential floor area
# ------------------------------------------------------------
self.f_ss_cooling_weekend = 0.45 # Temporal calibration factor
self.f_ss_weekend = 0.8 # Temporal calibration factor
self.f_is_weekend = 0.45 # Temporal calibration factor
# ============================================================
# Modelled day related factors
# ============================================================
# model_yeardays_date : dict
# Contains for the base year for each days
# the information wheter this is a working or holiday
# ------------------------------------------------------------
self.model_yeardays = list(range(365))
# Calculate dates
self.model_yeardays_date = []
for yearday in self.model_yeardays:
self.model_yeardays_date.append(
date_prop.yearday_to_date(base_yr, yearday))
# ============================================================
# Dwelling stock related assumptions
# ============================================================
#
# Assumptions to generate a virtual dwelling stock
#
# assump_diff_floorarea_pp : float
# Change in floor area per person (%, 1=100%)
# assump_diff_floorarea_pp_yr_until_changed : int
# Year until this change in floor area happens
# dwtype_distr_by : dict
# Housing Stock Distribution by Type
# Source: UK Housing Energy Fact File, Table 4c
# dwtype_distr_fy : dict
# welling type distribution end year
# Source: UK Housing Energy Fact File, Table 4c
# dwtype_floorarea_by : dict
# Floor area per dwelling type (Annex Table 3.1)
# Source: UK Housing Energy Fact File, Table 4c
# dwtype_floorarea_fy : dict
# Floor area per dwelling type
# Source: UK Housing Energy Fact File, Table 4c
# dwtype_age_distr : dict
# Floor area per dwelling type
# Source: Housing Energy Fact Sheet)
# yr_until_changed : int
# Year until change is realised
#
# https://www.gov.uk/government/statistics/english-housing-survey-2014-to-2015-housing-stock-report
# ------------------------------------------------------------
yr_until_changed_all_things = 2050
self.dwtype_distr_by = {
'semi_detached': 0.26,
'terraced': 0.283,
'flat': 0.203,
'detached': 0.166,
'bungalow': 0.088}
self.dwtype_distr_fy = {
'yr_until_changed': yr_until_changed_all_things,
'semi_detached': 0.26,
'terraced': 0.283,
'flat': 0.203,
'detached': 0.166,
'bungalow': 0.088}
self.dwtype_floorarea_by = {
'semi_detached': 96,
'terraced': 82.5,
'flat': 61,
'detached': 147,
'bungalow': 77}
self.dwtype_floorarea_fy = {
'yr_until_changed': yr_until_changed_all_things,
'semi_detached': 96,
'terraced': 82.5,
'flat': 61,
'detached': 147,
'bungalow': 77}
# (Average builing age within age class, fraction)
# The newest category of 2015 is added to implement change in refurbishing rate
# For the base year, this is set to zero (if e.g. with future scenario set to 5%, then
# proportionally to base year distribution number of houses are refurbished)
self.dwtype_age_distr = {
2015: {
'1918' :0.21,
'1941': 0.36,
'1977.5': 0.3,
'1996.5': 0.08,
'2002': 0.05}}
# ============================================================
# Scenario drivers
# ============================================================
#
# For every enduse the relevant factors which affect enduse
# consumption can be added in a list.
#
# Note: If e.g. floorarea and population are added, the
# effects will be overestimates (i.e. no multi-
# collinearity are considered).
#
# scenario_drivers : dict
# Scenario drivers per enduse
# ------------------------------------------------------------
self.scenario_drivers = {
# --Residential
'rs_space_heating': ['floorarea', 'hlc'], # Do not use HDD or pop because otherweise double count
'rs_water_heating': ['population'],
'rs_lighting': ['population', 'floorarea'],
'rs_cooking': ['population'],
'rs_cold': ['population'],
'rs_wet': ['population'],
'rs_consumer_electronics': ['population', 'gva'],
'rs_home_computing': ['population'],
# --Service
'ss_space_heating': ['floorarea'],
'ss_water_heating': ['population'],
'ss_lighting': ['floorarea'],
'ss_catering': ['population'],
'ss_ICT_equipment': ['population'],
'ss_cooling_humidification': ['floorarea', 'population'],
'ss_fans': ['floorarea', 'population'],
'ss_small_power': ['population'],
'ss_cooled_storage': ['population'],
'ss_other_gas': ['population'],
'ss_other_electricity': ['population'],
# Industry
'is_high_temp_process': ['gva'],
'is_low_temp_process': ['gva'],
'is_drying_separation': ['gva'],
'is_motors': ['gva'],
'is_compressed_air': ['gva'],
'is_lighting': ['gva'],
'is_space_heating': ['gva'],
'is_other': ['gva'],
'is_refrigeration': ['gva']}
# ============================================================
# Cooling related assumptions
# ============================================================
# assump_cooling_floorarea : int
# The percentage of cooled floor space in the base year
#
# Literature
# ----------
# Abela, A. et al. (2016). Study on Energy Use by Air
# Conditioning. Bre, (June), 31. Retrieved from
# https://www.bre.co.uk/filelibrary/pdf/projects/aircon-energy-use/StudyOnEnergyUseByAirConditioningFinalReport.pdf
# ------------------------------------------------------------
# See Abela et al. (2016) & Carbon Trust. (2012). Air conditioning. Maximising comfort, minimising energy consumption
self.cooled_ss_floorarea_by = 0.35
# ============================================================
# Smart meter related base year assumptions
# ============================================================
# smart_meter_p_by : int
# The percentage of households with smart meters in by
# ------------------------------------------------------------
self.smart_meter_assump = {}
# Currently in 2017 8.6 mio smart meter installed of 27.2 mio households --> 31.6%
# https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/671930/Smart_Meters_2017_update.pdf)
# In 2015, 5.8 % percent of all househods had one: https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/533060/2016_Q1_Smart_Meters_Report.pdf
self.smart_meter_assump['smart_meter_p_by'] = 0.05
# Long term smart meter induced general savings, purley as
# a result of having a smart meter (e.g. 0.03 --> 3% savings)
# DECC 2015: Smart Metering Early Learning Project: Synthesis report
# https://www.gov.uk/government/publications/smart-metering-early-learning-project-and-small-scale-behaviour-trials
# Reasonable assumption is between 0.03 and 0.01 (DECC 2015)
self.smart_meter_assump['savings_smart_meter'] = {
# Residential
'rs_cold': 0.03,
'rs_cooking': 0.03,
'rs_lighting': 0.03,
'rs_wet': 0.03,
'rs_consumer_electronics': 0.03,
'rs_home_computing': 0.03,
'rs_space_heating': 0.03,
'rs_water_heating': 0.03,
# Service
'ss_space_heating': 0.03,
'ss_water_heating': 0.03,
'ss_cooling_humidification': 0.03,
'ss_fans': 0.03,
'ss_lighting': 0.03,
'ss_catering': 0.03,
'ss_small_power': 0.03,
'ss_ICT_equipment': 0.03,
'ss_cooled_storage': 0.03,
'ss_other_gas': 0.03,
'ss_other_electricity': 0.03,
# Industry submodule
'is_high_temp_process': 0,
'is_low_temp_process': 0,
'is_drying_separation': 0,
'is_motors': 0,
'is_compressed_air': 0,
'is_lighting': 0,
'is_space_heating': 0,
'is_other': 0,
'is_refrigeration': 0}
# ============================================================
# Base temperature assumptions
# ============================================================
#
# Parameters related to smart metering
#
# rs_t_heating : int
# Residential submodel base temp of heating of base year
# rs_t_cooling_by : int
# Residential submodel base temp of cooling of base year
# ...
#
# Note
# ----
# Because demand for cooling cannot directly be linked to
# calculated cdd, the paramters 'ss_t_base_cooling' is used
# as a calibration factor. By artifiallcy lowering this
# parameter, the energy demand assignement over the days
# in a year is improved.
# ------------------------------------------------------------
t_bases = {
'rs_t_heating': 15.5,
'ss_t_heating': 15.5,
'ss_t_cooling': 5,
'is_t_heating': 15.5}
self.t_bases = DummyClass(t_bases)
# ============================================================
# Enduses lists affed by hdd/cdd
# ============================================================
#
# These lists show for which enduses temperature related
# calculations are performed.
#
# enduse_space_heating : list
# All enduses for which hdd are used for yd calculations
# ss_enduse_space_cooling : list
# All service submodel enduses for which cdd are used for
# yd calculations
# ------------------------------------------------------------
self.enduse_space_heating = [
'rs_space_heating',
'ss_space_heating',
'is_space_heating']
self.ss_enduse_space_cooling = [
'ss_cooling_humidification']
# ============================================================
# Industry related
#
# High temperature processing (high_temp_ process) dominates
# energy consumption in the iron and steel
#
# ---- Steel production - Enduse: is_high_temp_process, Sector: basic_metals
# With industry service switch, the future shares of 'is_temp_high_process'
# in sector 'basic_metals' can be set for 'basic_oxygen_furnace',
# 'electric_arc_furnace', and 'SNG_furnace' can be specified
#
# ---- Cement production - Enduse: is_high_temp_process, Sector: non_metallic_mineral_products
# Dry kilns, semidry kilns can be set
# ============================================================
# Share of cold rolling in steel manufacturing
self.p_cold_rolling_steel_by = 0.2 # Estimated based on https://aceroplatea.es/docs/EuropeanSteelFigures_2015.pdf
self.eff_cold_rolling_process = 1.8 # 80% more efficient than hot rolling Fruehan et al. (2002)
self.eff_hot_rolling_process = 1.0 # 100% assumed efficiency
# ============================================================
# Assumption related to heat pump technologies
# ============================================================
#
# Assumptions related to technologies
#
# gshp_fraction : list
# Fraction of installed gshp_fraction heat pumps in base year
# ASHP = 1 - gshp_fraction
# ------------------------------------------------------------
self.gshp_fraction = 0.1
# Load defined technologies
self.technologies, self.tech_list = read_data.read_technologies(paths['path_technologies'])
self.installed_heat_pump_by = tech_related.generate_ashp_gshp_split(
self.gshp_fraction)
# Add heat pumps to technologies
self.technologies, self.tech_list['heating_non_const'], self.heat_pumps = tech_related.generate_heat_pump_from_split(
self.technologies,
self.installed_heat_pump_by,
self.fueltypes)
# ============================================================
# Fuel Stock Definition
# Provide for every fueltype of an enduse the share of fuel
# which is used by technologies in the base year
# ============================================================$
fuel_tech_p_by = fuel_shares.assign_by_fuel_tech_p(
enduses,
sectors,
self.fueltypes,
self.fueltypes_nr)
# ========================================
# Get technologies of an enduse and sector
# ========================================
self.specified_tech_enduse_by = helpers.get_def_techs(
fuel_tech_p_by)
_specified_tech_enduse_by = helpers.add_undef_techs(
self.heat_pumps,
self.specified_tech_enduse_by,
self.enduse_space_heating)
self.specified_tech_enduse_by = _specified_tech_enduse_by
# ========================================
# General other info
# ========================================
self.seasons = date_prop.get_season(year_to_model=base_yr)
self.model_yeardays_daytype, self.yeardays_month, self.yeardays_month_days = date_prop.get_yeardays_daytype(
year_to_model=base_yr)
# ========================================
# Helper functions
# ========================================
self.fuel_tech_p_by, self.specified_tech_enduse_by, self.technologies = tech_related.insert_placholder_techs(
self.technologies,
fuel_tech_p_by,
self.specified_tech_enduse_by)
# ========================================
# Calculations with assumptions
# ========================================
self.cdd_weekend_cfactors = hdd_cdd.calc_weekend_corr_f(
self.model_yeardays_daytype,
self.f_ss_cooling_weekend)
self.ss_weekend_f = hdd_cdd.calc_weekend_corr_f(
self.model_yeardays_daytype,
self.f_ss_weekend)
self.is_weekend_f = hdd_cdd.calc_weekend_corr_f(
self.model_yeardays_daytype,
self.f_is_weekend)
# ========================================
# Testing
# ========================================
testing_functions.testing_fuel_tech_shares(
self.fuel_tech_p_by)
testing_functions.testing_tech_defined(
self.technologies, self.specified_tech_enduse_by)
def update(self, name, value):
"""Update assumptions
Arguments
---------
name : str
name of attribute
value : any
Type of value
"""
setattr(self, name, value)
def update_technology_assumption(
technologies,
narrative_f_eff_achieved,
narrative_gshp_fraction,
crit_narrative_input=True
):
"""Updates technology related properties based on
scenario assumptions. Calculate average efficiency of
heat pumps depending on mix of GSHP and ASHP,
set the efficiency achieval factor of all factor according
to strategy assumptions
Parameters
----------
technologies : dict
Technologies
f_eff_achieved : float
Factor achieved
gshp_fraction : float
Mix of GSHP and GSHP
crit_narrative_input : bool
Criteria wheter inputs are single values or a narrative
Note
----
This needs to be run everytime an assumption is changed
"""
if crit_narrative_input:
# Read from narrative the value
f_eff_achieved = narrative_related.read_from_narrative(narrative_f_eff_achieved)
gshp_fraction = narrative_related.read_from_narrative(narrative_gshp_fraction)
else:
f_eff_achieved = narrative_f_eff_achieved
gshp_fraction = narrative_gshp_fraction
# Assign same achieved efficiency factor for all technologies
technologies = helpers.set_same_eff_all_tech(
technologies,
f_eff_achieved)
# Calculate average eff of hp depending on fraction of GSHP to ASHP
installed_heat_pump_ey = tech_related.generate_ashp_gshp_split(
gshp_fraction)
technologies = tech_related.calc_av_heat_pump_eff_ey(
technologies, installed_heat_pump_ey)
return technologies
class DummyClass(object):
"""Assumptions
"""
def __init__(
self,
variables
):
for var, value in variables.items():
setattr(self, var, value)
| {
"repo_name": "nismod/energy_demand",
"path": "energy_demand/assumptions/general_assumptions.py",
"copies": "1",
"size": "22215",
"license": "mit",
"hash": 1712889807449028600,
"line_mean": 39.6124314442,
"line_max": 194,
"alpha_frac": 0.5036236777,
"autogenerated": false,
"ratio": 4.0911602209944755,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5094783898694475,
"avg_score": null,
"num_lines": null
} |
__all__ = ['AsyncPythonInterpreter']
try:
import fcntl
except:
fcntl = None
import os
import sys
import socket
from StringIO import StringIO
from netrepr import NetRepr, RemoteObjectPool, RemoteObjectReference
import objc
from Foundation import *
IMPORT_MODULES = ['netrepr', 'remote_console', 'remote_pipe', 'remote_bootstrap']
source = StringIO()
for fn in IMPORT_MODULES:
for line in file(fn+'.py', 'rU'):
source.write(line)
source.write('\n\n')
SOURCE = repr(source.getvalue()) + '\n'
def bind_and_listen(hostport):
if isinstance(hostport, str):
host, port = hostport.split(':')
hostport = (host, int(port))
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# set close-on-exec
if hasattr(fcntl, 'FD_CLOEXEC'):
old = fcntl.fcntl(serversock.fileno(), fcntl.F_GETFD)
fcntl.fcntl(serversock.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
# allow the address to be re-used in a reasonable amount of time
if os.name == 'posix' and sys.platform != 'cygwin':
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversock.bind(hostport)
serversock.listen(5)
return serversock
class AsyncPythonInterpreter(NSObject):
commandReactor = objc.IBOutlet('commandReactor')
def init(self):
self = super(AsyncPythonInterpreter, self).init()
self.host = None
self.port = None
self.interpreterPath = None
self.scriptPath = None
self.commandReactor = None
self.serverSocket = None
self.serverFileHandle = None
self.buffer = ''
self.serverFileHandle = None
self.remoteFileHandle = None
self.childTask = None
return self
def initWithHost_port_interpreterPath_scriptPath_commandReactor_(self, host, port, interpreterPath, scriptPath, commandReactor):
self = self.init()
self.host = host
self.port = port
self.interpreterPath = interpreterPath
self.scriptPath = scriptPath
self.commandReactor = commandReactor
self.serverSocket = None
return self
def awakeFromNib(self):
defaults = NSUserDefaults.standardUserDefaults()
def default(k, v, typeCheck=None):
rval = defaults.objectForKey_(k)
if typeCheck is not None and rval is not None:
try:
rval = typeCheck(rval)
except TypeError:
NSLog(u'%s failed type check %s with value %s', k, typeCheck.__name__, rval)
rval = None
if rval is None:
defaults.setObject_forKey_(v, k)
rval = v
return rval
self.host = default(u'AsyncPythonInterpreterInterpreterHost', u'127.0.0.1', str)
self.port = default(u'AsyncPythonInterpreterInterpreterPort', 0, int)
self.interpreterPath = default(u'AsyncPythonInterpreterInterpreterPath', u'/usr/bin/python', unicode)
self.scriptPath = type(self).bundleForClass().pathForResource_ofType_(u'tcpinterpreter', u'py')
def connect(self):
#NSLog(u'connect')
self.serverSocket = bind_and_listen((self.host, self.port))
self.serverFileHandle = NSFileHandle.alloc().initWithFileDescriptor_(self.serverSocket.fileno())
nc = NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(
self,
'remoteSocketAccepted:',
NSFileHandleConnectionAcceptedNotification,
self.serverFileHandle)
self.serverFileHandle.acceptConnectionInBackgroundAndNotify()
self.remoteFileHandle = None
for k in os.environ.keys():
if k.startswith('PYTHON'):
del os.environ[k]
self.childTask = NSTask.launchedTaskWithLaunchPath_arguments_(self.interpreterPath, [self.scriptPath, repr(self.serverSocket.getsockname())])
nc.addObserver_selector_name_object_(
self,
'childTaskTerminated:',
NSTaskDidTerminateNotification,
self.childTask)
return self
def remoteSocketAccepted_(self, notification):
#NSLog(u'remoteSocketAccepted_')
self.serverFileHandle.closeFile()
self.serverFileHandle = None
ui = notification.userInfo()
self.remoteFileHandle = ui.objectForKey_(NSFileHandleNotificationFileHandleItem)
nc = NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(
self,
'remoteFileHandleReadCompleted:',
NSFileHandleReadCompletionNotification,
self.remoteFileHandle)
self.writeBytes_(SOURCE)
self.remoteFileHandle.readInBackgroundAndNotify()
self.commandReactor.connectionEstablished_(self)
NSNotificationCenter.defaultCenter().postNotificationName_object_(u'AsyncPythonInterpreterOpened', self)
def remoteFileHandleReadCompleted_(self, notification):
#NSLog(u'remoteFileHandleReadCompleted_')
ui = notification.userInfo()
newData = ui.objectForKey_(NSFileHandleNotificationDataItem)
if newData is None:
self.close()
NSLog(u'Error: %@', ui.objectForKey_(NSFileHandleError))
return
bytes = newData.bytes()[:]
if len(bytes) == 0:
self.close()
return
self.remoteFileHandle.readInBackgroundAndNotify()
start = len(self.buffer)
buff = self.buffer + newData.bytes()[:]
#NSLog(u'current buffer: %s', buff)
lines = []
while True:
linebreak = buff.find('\n', start) + 1
if linebreak == 0:
break
lines.append(buff[:linebreak])
buff = buff[linebreak:]
start = 0
#NSLog(u'lines: %s', lines)
self.buffer = buff
for line in lines:
self.commandReactor.lineReceived_fromConnection_(line, self)
def writeBytes_(self, bytes):
#NSLog(u'Writing bytes: %s' bytes)
try:
self.remoteFileHandle.writeData_(NSData.dataWithBytes_length_(bytes, len(bytes)))
except objc.error:
self.close()
#NSLog(u'bytes written.')
def childTaskTerminated_(self, notification):
#NSLog(u'childTaskTerminated_')
self.close()
def closeServerFileHandle(self):
#NSLog(u'closeServerFileHandle')
if self.serverFileHandle is not None:
try:
self.serverFileHandle.closeFile()
except objc.error:
pass
self.serverFileHandle = None
def closeRemoteFileHandle(self):
#NSLog(u'closeRemoteFileHandle')
if self.remoteFileHandle is not None:
try:
self.remoteFileHandle.closeFile()
except objc.error:
pass
self.remoteFileHandle = None
def terminateChildTask(self):
#NSLog(u'terminateChildTask')
if self.childTask is not None:
try:
self.childTask.terminate()
except objc.error:
pass
self.childTask = None
def close(self):
#NSLog(u'close')
NSNotificationCenter.defaultCenter().removeObserver_(self)
self.finalClose()
NSNotificationCenter.defaultCenter().postNotificationName_object_(u'AsyncPythonInterpreterClosed', self)
def finalClose(self):
if self.commandReactor is not None:
self.commandReactor.connectionClosed_(self)
self.commandReactor = None
self.closeServerFileHandle()
self.closeRemoteFileHandle()
self.terminateChildTask()
def test_console():
from PyObjCTools import AppHelper
from ConsoleReactor import ConsoleReactor
host = '127.0.0.1'
port = 0
interpreterPath = sys.executable
scriptPath = unicode(os.path.abspath('tcpinterpreter.py'))
commandReactor = ConsoleReactor.alloc().init()
interp = AsyncPythonInterpreter.alloc().initWithHost_port_interpreterPath_scriptPath_commandReactor_(host, port, interpreterPath, scriptPath, commandReactor)
interp.connect()
class ThisEventLoopStopper(NSObject):
def interpFinished_(self, notification):
AppHelper.stopEventLoop()
stopper = ThisEventLoopStopper.alloc().init()
NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(stopper, 'interpFinished:', u'AsyncPythonInterpreterClosed', interp)
AppHelper.runConsoleEventLoop(installInterrupt=True)
def main():
test_console()
if __name__ == '__main__':
main()
| {
"repo_name": "ariabuckles/pyobjc-core",
"path": "Examples/NonFunctional/RemotePyInterpreter/AsyncPythonInterpreter.py",
"copies": "2",
"size": "8660",
"license": "mit",
"hash": 3777736824029818400,
"line_mean": 36.4891774892,
"line_max": 161,
"alpha_frac": 0.6396073903,
"autogenerated": false,
"ratio": 4.14155906264945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003693664650227369,
"num_lines": 231
} |
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
import itertools
import operator
import warnings
from . import numeric as _nx
from . import overrides
from ._asarray import array, asanyarray
from .multiarray import normalize_axis_index
from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _atleast_1d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_2d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_3d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
warnings.warn('arrays to stack must be passed as a "sequence" type '
'such as list or tuple. Support for non-sequence '
'iterables such as generators is deprecated as of '
'NumPy 1.16 and will raise an error in the future.',
FutureWarning, stacklevel=stacklevel)
return ()
return arrays
def _vhstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
return _nx.concatenate(arrs, 0)
@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
block : Assemble an nd-array from nested lists of blocks.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
"""
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
return 'arrays' + idx_str
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
_block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at depth "
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
_block_format_index(index)
)
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
# Ensures `a` has at least `ndim` dimensions by prepending
# ones to `a.shape` as necessary
return array(a, ndmin=ndim, copy=False, subok=True)
def _accumulate(values):
return list(itertools.accumulate(values))
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatation.
Returns
-------
shape: tuple of int
This tuple satisfies:
```
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
```
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds:
```
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
```
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis+1:]
if any(shape[:axis] != first_shape_pre or
shape[axis+1:] != first_shape_post for shape in shapes):
raise ValueError(
'Mismatched array shapes in block along axis {}.'.format(axis))
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim: int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
def _block(arrays, max_depth, result_ndim, depth=0):
"""
Internal implementation of block based on repeated concatenation.
`arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
yield from _block_dispatcher(subarrays)
else:
yield arrays
@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated (see `concatenate`) along
the last dimension (-1), then these are concatenated along the
second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``np.block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
.. versionadded:: 1.13.0
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Notes
-----
When called with only scalars, ``np.block`` is equivalent to an ndarray
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
``np.array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
Examples
--------
The most common use of this function is to build a block matrix
>>> A = np.eye(2) * 2
>>> B = np.eye(3) * 3
>>> np.block([
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[1., 1., 3., 0., 0.],
[1., 1., 0., 3., 0.],
[1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
array([1, 2, 3])
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
array([ 1, 2, 3, 2, 3, 4, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([A, B]) # hstack([A, B])
array([[1, 1, 2, 2],
[1, 1, 2, 2]])
With a list of depth 2, `block` can be used in place of `vstack`:
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([[a], [b]]) # vstack([a, b])
array([[1, 2, 3],
[2, 3, 4]])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([[A], [B]]) # vstack([A, B])
array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
It can also be used in places of `atleast_1d` and `atleast_2d`
>>> a = np.array(0)
>>> b = np.array([1])
>>> np.block([a]) # atleast_1d(a)
array([0])
>>> np.block([b]) # atleast_1d(b)
array([1])
>>> np.block([[a]]) # atleast_2d(a)
array([[0]])
>>> np.block([[b]]) # atleast_2d(b)
array([[1]])
"""
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
# It was found through benchmarking that making an array of final size
# around 256x256 was faster by straight concatenation on a
# i7-7700HQ processor and dual channel ram 2400MHz.
# It didn't seem to matter heavily on the dtype used.
#
# A 2D array using repeated concatenation requires 2 copies of the array.
#
# The fastest algorithm will depend on the ratio of CPU power to memory
# speed.
# One can monitor the results of the benchmark
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
# to tune this parameter until a C version of the `_block_info_recursion`
# algorithm is implemented which would likely be faster than the python
# version.
if list_ndim * final_size > (2 * 512 * 512):
return _block_slicing(arrays, list_ndim, result_ndim)
else:
return _block_concatenate(arrays, list_ndim, result_ndim)
# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
Returns
(`arrays`, list_ndim, result_ndim, final_size)
"""
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
'List at {} cannot be empty'.format(
_block_format_index(bottom_index)
)
)
result_ndim = max(arr_ndim, list_ndim)
return arrays, list_ndim, result_ndim, final_size
def _block_slicing(arrays, list_ndim, result_ndim):
shape, slices, arrays = _block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
# Test preferring F only in the case that all input arrays are F
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = _nx.empty(shape=shape, dtype=dtype, order=order)
# Note: In a c implementation, the function
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
# guessing of the desired order.
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
def _block_concatenate(arrays, list_ndim, result_ndim):
result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
# This might copy scalars or lists twice, but this isn't a likely
# usecase for those interested in performance
result = result.copy()
return result
| {
"repo_name": "mattip/numpy",
"path": "numpy/core/shape_base.py",
"copies": "2",
"size": "29014",
"license": "bsd-3-clause",
"hash": 2130534262759212000,
"line_mean": 31.2377777778,
"line_max": 81,
"alpha_frac": 0.5908526918,
"autogenerated": false,
"ratio": 3.7778645833333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00004098497775101207,
"num_lines": 900
} |
__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack',
'column_stack','row_stack', 'dstack','array_split','split','hsplit',
'vsplit','dsplit','apply_over_axes','expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, zeros, newaxis, outer, \
concatenate, isscalar, array, asanyarray
from numpy.core.fromnumeric import product, reshape
def apply_along_axis(func1d,axis,arr,*args):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
Returns
-------
outarr : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> def new_func(a):
... \"\"\"Divide elements of a by 2.\"\"\"
... return a * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(new_func, 0, b)
array([[ 0.5, 1. , 1.5],
[ 2. , 2.5, 3. ],
[ 3.5, 4. , 4.5]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis,nd))
ind = [0]*(nd-1)
i = zeros(nd,'O')
indlist = range(nd)
indlist.remove(axis)
i[axis] = slice(None,None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist,ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res` has one
less dimension than `a`, a dimension is inserted before `axis`.
The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : ndarray
Input array.
axes : array_like
Axes over which `func` is applied, the elements must be
integers.
Returns
-------
val : ndarray
The output array. The number of dimensions is the same as `a`, but
the shape can be different. This depends on whether `func` changes
the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0: axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res,axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError, "function is not returning"\
" an array of correct shape"
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
array1, array2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=1))
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[ 3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[ 0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=2))
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 3``.
Copies are avoided where possible, and views with three or more
dimensions are returned. For example, a 1-D array of shape ``N``
becomes a view of shape ``(1, N, 1)``. A 2-D array of shape ``(M, N)``
becomes a view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[ 3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print arr, arr.shape
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1,1,1)
elif len(ary.shape) == 1:
result = ary[newaxis,:,newaxis]
elif len(ary.shape) == 2:
result = ary[:,:,newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
Take a sequence of arrays and stack them vertically to make a single
array. Rebuild arrays divided by `vsplit`.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays together.
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=0)``
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return _nx.concatenate(map(atleast_2d,tup),0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by ``hsplit``.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays together.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
return _nx.concatenate(map(atleast_1d,tup),1)
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Notes
-----
This function is equivalent to ``np.vstack(tup).T``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v,copy=False,subok=True)
if arr.ndim < 2:
arr = array(arr,copy=False,subok=True,ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays,1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by ``dsplit``.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join arrays.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate(map(atleast_3d,tup),2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.array([])
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)):
sub_arys[i] = _nx.array([])
return sub_arys
def array_split(ary,indices_or_sections,axis = 0):
"""
Split an array into multiple sub-arrays of equal or near-equal size.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try: # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError: #indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError, 'number sections must be larger than 0.'
Neach_section,extras = divmod(Ntotal,Nsections)
section_sizes = [0] + \
extras * [Neach_section+1] + \
(Nsections-extras) * [Neach_section]
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary,axis,0)
for i in range(Nsections):
st = div_points[i]; end = div_points[i+1]
sub_arys.append(_nx.swapaxes(sary[st:end],axis,0))
# there is a wierd issue with array slicing that allows
# 0x10 arrays and other such things. The following cluge is needed
# to get around this issue.
sub_arys = _replace_zero_by_x_arrays(sub_arys)
# end cluge.
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays of equal size.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis = 0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try: len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError, 'array split does not result in an equal division'
res = array_split(ary,indices_or_sections,axis)
return res
def hsplit(ary,indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the ``split`` documentation. ``hsplit`` is equivalent
to ``split`` with `axis=1`, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError, 'hsplit only works on arrays of 1 or more dimensions'
if len(ary.shape) > 1:
return split(ary,indices_or_sections,1)
else:
return split(ary,indices_or_sections,0)
def vsplit(ary,indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError, 'vsplit only works on arrays of 2 or more dimensions'
return split(ary,indices_or_sections,0)
def dsplit(ary,indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the ``split`` documentation. ``dsplit`` is equivalent
to ``split`` with `axis=2`, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError, 'vsplit only works on arrays of 3 or more dimensions'
return split(ary,indices_or_sections,2)
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = [(getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__')]
wrappers.sort()
if wrappers:
return wrappers[-1][-1]
return None
def kron(a,b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimenensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> C[K] == A[I]*B[J]
True
"""
wrapper = get_array_wrap(a, b)
b = asanyarray(b)
a = array(a,copy=False,subok=True,ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a,b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a,b).reshape(as_+bs)
axis = nd-1
for _ in xrange(nd):
result = concatenate(result, axis=axis)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The output array.
See Also
--------
repeat
Notes
-----
If `reps` has length d, the result will have dimension of max(d, `A`.ndim).
If `A`.ndim < d, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1,3) for 2-D replication,
or shape (1,1,3) for 3-D replication. If this is not the desired behavior,
promote `A` to d-dimensions manually before calling this function.
If `A`.ndim > d, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2,3,4,5), a `reps` of (2,2) is treated as
(1,1,2,2).
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
<BLANKLINE>
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
c = _nx.array(A,copy=False,subok=True,ndmin=d)
shape = list(c.shape)
n = max(c.size,1)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
if nrep!=1:
c = c.reshape(-1,n).repeat(nrep,0)
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
n /= max(dim_in,1)
return c.reshape(shape)
| {
"repo_name": "efiring/numpy-work",
"path": "numpy/lib/shape_base.py",
"copies": "2",
"size": "29979",
"license": "bsd-3-clause",
"hash": 5123676412467717000,
"line_mean": 26.7583333333,
"line_max": 80,
"alpha_frac": 0.5092564795,
"autogenerated": false,
"ratio": 3.3684269662921347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9864320568584357,
"avg_score": 0.00267257544155557,
"num_lines": 1080
} |
__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack']
import numeric as _nx
from numeric import array, asanyarray, newaxis
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
array1, array2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=1))
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[ 3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[ 0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=2))
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[ 3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print arr, arr.shape
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1,1,1)
elif len(ary.shape) == 1:
result = ary[newaxis,:,newaxis]
elif len(ary.shape) == 2:
result = ary[:,:,newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
Take a sequence of arrays and stack them vertically to make a single
array. Rebuild arrays divided by `vsplit`.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays together.
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=0)``
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return _nx.concatenate(map(atleast_2d,tup),0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by `hsplit`.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays together.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
return _nx.concatenate(map(atleast_1d,tup),1)
| {
"repo_name": "qsnake/numpy",
"path": "numpy/core/shape_base.py",
"copies": "1",
"size": "6306",
"license": "bsd-3-clause",
"hash": 1895051155962351900,
"line_mean": 23.3474903475,
"line_max": 77,
"alpha_frac": 0.5245797653,
"autogenerated": false,
"ratio": 3.4782129067843353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45027926720843353,
"avg_score": null,
"num_lines": null
} |
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'vstack', 'hstack']
import numeric as _nx
from numeric import array, asanyarray, newaxis
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1)
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[ 3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[ 0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1, 1)
elif len(ary.shape) == 1 :
result = ary[newaxis, :]
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[ 3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print arr, arr.shape
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1,1,1)
elif len(ary.shape) == 1:
result = ary[newaxis,:,newaxis]
elif len(ary.shape) == 2:
result = ary[:,:,newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
Take a sequence of arrays and stack them vertically to make a single
array. Rebuild arrays divided by `vsplit`.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays together.
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that
are at least 2-dimensional.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return _nx.concatenate(map(atleast_2d,tup),0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by `hsplit`.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays together.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrs = map(atleast_1d,tup)
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
| {
"repo_name": "ArneBab/pypyjs",
"path": "website/demo/home/rfk/repos/pypy/lib_pypy/numpypy/core/shape_base.py",
"copies": "2",
"size": "6823",
"license": "mit",
"hash": 3433352907226032000,
"line_mean": 23.8109090909,
"line_max": 79,
"alpha_frac": 0.5205921149,
"autogenerated": false,
"ratio": 3.4864588656106283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9995279006110783,
"avg_score": 0.0023543948799691827,
"num_lines": 275
} |
__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack']
import numeric as _nx
from numeric import array, asarray, newaxis
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
array1, array2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=1))
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[ 3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[ 0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=2))
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 3``.
Copies are avoided where possible, and views with three or more
dimensions are returned. For example, a 1-D array of shape ``N``
becomes a view of shape ``(1, N, 1)``. A 2-D array of shape ``(M, N)``
becomes a view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[ 3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print arr, arr.shape
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1,1,1)
elif len(ary.shape) == 1:
result = ary[newaxis,:,newaxis]
elif len(ary.shape) == 2:
result = ary[:,:,newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
Take a sequence of arrays and stack them vertically to make a single
array. Rebuild arrays divided by `vsplit`.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays together.
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=0)``
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return _nx.concatenate(map(atleast_2d,tup),0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by ``hsplit``.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays together.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
return _nx.concatenate(map(atleast_1d,tup),1)
| {
"repo_name": "chadnetzer/numpy-gaurdro",
"path": "numpy/core/shape_base.py",
"copies": "4",
"size": "6294",
"license": "bsd-3-clause",
"hash": -7742093100033969000,
"line_mean": 23.3011583012,
"line_max": 79,
"alpha_frac": 0.5241499841,
"autogenerated": false,
"ratio": 3.485049833887043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006075969523756111,
"num_lines": 259
} |
import zipfile
import sys
import os
# Define the length of junk char (AAAAAAA...)
nm = "A"*7280
def shorten(sname, dname):
try:
source = zipfile.ZipFile(sname, 'r')
except:
print "[*] Can not file the source JAR file"
sys.exit(1)
target = zipfile.ZipFile(dname, 'w', zipfile.ZIP_DEFLATED)
for file in source.filelist:
target.writestr(file.filename.replace(nm,"A"), source.read(file.filename))
target.close()
source.close()
print "[*] Shortened", sname, "to", dname
def cls():
if sys.platform == 'linux-i386' or sys.platform == 'linux2':
os.system("clear")
elif sys.platform == 'win32':
os.system("cls")
else:
os.system("cls")
def banner():
cls()
print "======================================================="
print u"Allatori ZIP Shortener v1.0 [http://www.mertsarica.com]"
print "======================================================="
def usage():
print "Usage: python allatori_zip_shortener.py <source JAR file> <destination JAR file>\n"
if __name__ == '__main__':
cls()
banner()
if len(sys.argv) < 3:
usage()
sys.exit(1)
else:
shorten(sys.argv[1], sys.argv[2])
| {
"repo_name": "mertsarica/hack4career",
"path": "codes/allatori_zip_shortener.py",
"copies": "1",
"size": "1400",
"license": "apache-2.0",
"hash": 7461083032103459000,
"line_mean": 25.4150943396,
"line_max": 97,
"alpha_frac": 0.5342857143,
"autogenerated": false,
"ratio": 3.2634032634032635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9003354048902267,
"avg_score": 0.05886698576019932,
"num_lines": 53
} |
__all__ = ['Attribute']
class Attribute:
def __init__(self,
name,
lookup_sequence,
*,
is_vector=False,
is_packed_array=False,
array_size=None):
"""
* name is the name of the input attribute in the GLSL vertex
shader code. It is mainly used for error-reporting.
* lookup_sequence is an iterable of names or indices, which constitute
a path in the numpy array's dtype that locates the data in the numpy
array. Names are used to index into records, and indices are used
to lookup into arrays.
* is_vector is a Boolean that says whether this attribute is a vector,
e.g., vec4. Vector attributes have between 2 and 4 components. The
final dimension of the numpy array's shape must match the vector
length.
* is_packed_array is a Boolean that indicates that the data is packed
as vectors of length 4, which means that the array_size times 4 is
greater than or equal to the final dimension of the numpy array's
shape.
* array_size: an integer representing the size of the array
attribute, e.g., float data[7]. For now, these must be bound to
contiguous entries in the buffer numpy array. Otherwise
array_size is None to indicate a scalar.
"""
self.name = name
self.lookup_sequence = lookup_sequence
self.is_vector = is_vector
self.array_size = array_size
self.is_packed_array = is_packed_array
if is_vector and is_packed_array:
raise ValueError
def __repr__(self):
return (f"{type(self).__qualname__}("
f"{self.name}, {self.lookup_sequence}, "
f"is_vector={self.is_vector}, "
f"is_packed_array={self.is_packed_array}, "
f"array_size={self.array_size})")
| {
"repo_name": "NeilGirdhar/glx",
"path": "glx/shader_program/attribute.py",
"copies": "1",
"size": "1985",
"license": "mit",
"hash": -1031232717883819100,
"line_mean": 42.152173913,
"line_max": 78,
"alpha_frac": 0.5853904282,
"autogenerated": false,
"ratio": 4.411111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5496501539311112,
"avg_score": null,
"num_lines": null
} |
__all__ = ['AttrWrapper']
from openmdao.units import PhysicalQuantity
class AttrWrapper(object):
"""A class that encapsulates a value and any metadata necessary
for validation of that value. For example, an AttrWrapper for
a Float object would include 'units' metadata to allow for unit
compatability checking and conversion.
"""
def __init__(self, value=None, **metadata):
self.value = value
self.metadata = metadata
def _get_PQ(obj):
if isinstance(obj, UnitsAttrWrapper):
return obj.pq
return obj
class UnitsAttrWrapper(AttrWrapper):
"""A class that allows us to check for units metadata specifically and
to determine the units of an expression (sometimes).
"""
def __init__(self, value=None, **metadata):
super(UnitsAttrWrapper, self).__init__(value, **metadata)
self.pq = PhysicalQuantity(value, metadata['units'])
def __add__(self, other):
pq = self.pq + _get_PQ(other)
return UnitsAttrWrapper(pq.value, units=pq.get_unit_name())
__radd__ = __add__
def __sub__(self, other):
pq = self.pq - _get_PQ(other)
return UnitsAttrWrapper(pq.value, units=pq.get_unit_name())
def __rsub__(self, other):
pq = _get_PQ(other) - self.pq
return UnitsAttrWrapper(pq.value, units=pq.get_unit_name())
def __mul__(self, other):
pq = self.pq * _get_PQ(other)
return UnitsAttrWrapper(pq.value, units=pq.get_unit_name())
__rmul__ = __mul__
def __div__(self, other):
pq = self.pq / _get_PQ(other)
return UnitsAttrWrapper(pq.value, units=pq.get_unit_name())
def __rdiv__(self, other):
pq = _get_PQ(other) / self.pq
return UnitsAttrWrapper(pq.value, units=pq.get_unit_name())
def __pow__(self, other):
pq = self.pq / _get_PQ(other)
return UnitsAttrWrapper(pq.value, units=pq.get_unit_name())
def __rpow__(self, other):
raise TypeError('Exponents must be dimensionless but this one has units of %s' % self.pq.get_unit_name())
def __abs__(self):
return UnitsAttrWrapper(abs(self.value), units=self.pq.get_unit_name())
def __pos__(self):
return self
def __neg__(self):
return UnitsAttrWrapper(-self.value, units=self.pq.get_unit_name())
def convert_from(self, wrapper):
if isinstance(wrapper, UnitsAttrWrapper):
return wrapper.pq.convert_value(self.pq.unit)
raise ValueError("incompatible AttrWrapper objects")
def create_attr_wrapper(value, **meta):
if 'units' in meta:
return UnitsAttrWrapper(value, **meta)
else:
return AttrWrapper(value, **meta)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/attrwrapper.py",
"copies": "1",
"size": "2692",
"license": "mit",
"hash": 6784085657674495000,
"line_mean": 29.9425287356,
"line_max": 113,
"alpha_frac": 0.6281575037,
"autogenerated": false,
"ratio": 3.5467720685111987,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46749295722111983,
"avg_score": null,
"num_lines": null
} |
__all__ = ['auth']
__version__ = '0.1'
from flask_login import (LoginManager,
login_required,
login_user,
current_user,
login_fresh,
logout_user,
AnonymousUserMixin,
)
from functools import wraps
from db.sql import User as DbUser
import sha
def noauth(func):
@wraps(func)
def with_logging(*args, **kwargs):
return func(*args, **kwargs)
return with_logging
class Auth(object):
def init_app(self, app, method):
self._login_manager = LoginManager()
self._login_manager.session_protection = "basic"
self._login_manager.init_app(app)
self._login_manager.user_loader(self._load_user)
self._login_manager.anonymous_user = Anonymous
self.login_required = login_required
self.login_fresh = login_fresh
self.current_user = current_user
def login(self, **kargs):
if 'username' in kargs and 'password' in kargs:
u = User.get(kargs['username'], kargs['password'])
if u is None:
return None
login_user(u, remember=kargs['remember'])
return u
return None
def unauthorized_handler(self, func):
self._login_manager.unauthorized_handler(func)
@staticmethod
def _load_user(userid):
u = User.get(userid, None)
return u
def logout(self, username):
User.delete(username)
return logout_user()
class User(object):
_users = {}
def __init__(self, userid):
self._userid = userid
self._is_admin = False
@classmethod
def get(cls, u, password):
if password is None:
if u in cls._users:
return cls._users[u]
return None
if not cls.authenticate(u, password):
return None
user = User(u)
if u == 'admin':
user._is_admin = True
cls._users[u] = user
return user
@classmethod
def authenticate(cls, u, password):
users = DbUser.select().where(DbUser.user == u)
if users.count() <= 0:
return False
if users[0].password == sha.new(password).hexdigest():
return True
return False
@classmethod
def delete(cls, u):
if u in cls._users:
del cls._users[u]
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def is_admin(self):
return self._is_admin
def get_id(self):
return self._userid
def username(self):
return self._userid
class Anonymous(AnonymousUserMixin):
def __init__(self):
self._userid = 'Anonymous'
def username(self):
return self._userid
auth = Auth()
| {
"repo_name": "matthewbaggett/Docker-Ubuntu-Desktop",
"path": "web/auth/__init__.py",
"copies": "12",
"size": "2939",
"license": "apache-2.0",
"hash": -3900741275679096300,
"line_mean": 23.6974789916,
"line_max": 62,
"alpha_frac": 0.5464443688,
"autogenerated": false,
"ratio": 4.204577968526467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006002400960384153,
"num_lines": 119
} |
"""Allauth overrides"""
from __future__ import absolute_import
import json
import logging
from allauth.account.adapter import DefaultAccountAdapter
from django.template.loader import render_to_string
from readthedocs.core.utils import send_email
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
log = logging.getLogger(__name__)
class AccountAdapter(DefaultAccountAdapter):
"""Customize Allauth emails to match our current patterns"""
def format_email_subject(self, subject):
return force_text(subject)
def send_mail(self, template_prefix, email, context):
subject = render_to_string(
'{0}_subject.txt'.format(template_prefix), context
)
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
# Allauth sends some additional data in the context, remove it if the
# pieces can't be json encoded
removed_keys = []
for key in list(context.keys()):
try:
_ = json.dumps(context[key]) # noqa for F841
except (ValueError, TypeError):
removed_keys.append(key)
del context[key]
if removed_keys:
log.debug('Removed context we were unable to serialize: %s',
removed_keys)
send_email(
recipient=email,
subject=subject,
template='{0}_message.txt'.format(template_prefix),
template_html='{0}_message.html'.format(template_prefix),
context=context
)
| {
"repo_name": "pombredanne/readthedocs.org",
"path": "readthedocs/core/adapters.py",
"copies": "3",
"size": "1664",
"license": "mit",
"hash": 2204684073432813300,
"line_mean": 30.3962264151,
"line_max": 77,
"alpha_frac": 0.6334134615,
"autogenerated": false,
"ratio": 4.356020942408377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6489434403908377,
"avg_score": null,
"num_lines": null
} |
"""Allauth overrides"""
import pickle
import logging
from allauth.account.adapter import DefaultAccountAdapter
from django.template.loader import render_to_string
from readthedocs.core.utils import send_email
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
log = logging.getLogger(__name__)
class AccountAdapter(DefaultAccountAdapter):
"""Customize Allauth emails to match our current patterns"""
def format_email_subject(self, subject):
return force_text(subject)
def send_mail(self, template_prefix, email, context):
subject = render_to_string(
'{0}_subject.txt'.format(template_prefix), context
)
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
# Allauth sends some additional data in the context, remove it if the
# pieces can't be pickled
removed_keys = []
for key in context.keys():
try:
_ = pickle.dumps(context[key])
except (pickle.PickleError, TypeError):
removed_keys.append(key)
del context[key]
if removed_keys:
log.debug('Removed context we were unable to serialize: %s',
removed_keys)
send_email(
recipient=email,
subject=subject,
template='{0}_message.txt'.format(template_prefix),
template_html='{0}_message.html'.format(template_prefix),
context=context
)
| {
"repo_name": "tddv/readthedocs.org",
"path": "readthedocs/core/adapters.py",
"copies": "1",
"size": "1609",
"license": "mit",
"hash": 1990845204553198800,
"line_mean": 29.9423076923,
"line_max": 77,
"alpha_frac": 0.6314481044,
"autogenerated": false,
"ratio": 4.384196185286103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5515644289686104,
"avg_score": null,
"num_lines": null
} |
__all__ = ('autoclass', 'ensureclass')
from jnius import (
JavaClass, MetaJavaClass, JavaMethod, JavaStaticMethod,
JavaField, JavaStaticField, JavaMultipleMethod, find_javaclass, cast
)
# Workaround for the new metaclass syntax in Python 3
ClassBase = MetaJavaClass('ClassBase', (JavaClass, ), {})
class Class(ClassBase):
__javaclass__ = 'java/lang/Class'
desiredAssertionStatus = JavaMethod('()Z;')
forName = JavaMultipleMethod([
('(Ljava/lang/String,Z,Ljava/lang/ClassLoader;)Ljava/langClass;', True, False),
('(Ljava/lang/String;)Ljava/lang/Class;', True, False), ])
getClassLoader = JavaMethod('()Ljava/lang/ClassLoader;')
getClasses = JavaMethod('()[Ljava/lang/Class;')
getComponentType = JavaMethod('()Ljava/lang/Class;')
getConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredClasses = JavaMethod('()[Ljava/lang/Class;')
getDeclaredConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getDeclaredConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getDeclaredFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getDeclaredMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getDeclaredMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getDeclaringClass = JavaMethod('()Ljava/lang/Class;')
getField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getInterfaces = JavaMethod('()[Ljava/lang/Class;')
getMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getModifiers = JavaMethod('()[I;')
getName = JavaMethod('()Ljava/lang/String;')
getPackage = JavaMethod('()Ljava/lang/Package;')
getProtectionDomain = JavaMethod('()Ljava/security/ProtectionDomain;')
getResource = JavaMethod('(Ljava/lang/String;)Ljava/net/URL;')
getResourceAsStream = JavaMethod('(Ljava/lang/String;)Ljava/io/InputStream;')
getSigners = JavaMethod('()[Ljava/lang/Object;')
getSuperclass = JavaMethod('()Ljava/lang/reflect/Class;')
isArray = JavaMethod('()Z;')
isAssignableFrom = JavaMethod('(Ljava/lang/reflect/Class;)Z;')
isInstance = JavaMethod('(Ljava/lang/Object;)Z;')
isInterface = JavaMethod('()Z;')
isPrimitive = JavaMethod('()Z;')
newInstance = JavaMethod('()Ljava/lang/Object;')
toString = JavaMethod('()Ljava/lang/String;')
class Object(ClassBase):
__javaclass__ = 'java/lang/Object'
getClass = JavaMethod('()Ljava/lang/Class;')
hashCode = JavaMethod('()I')
toString = JavaMethod('()Ljava/lang/String;')
class Modifier(ClassBase):
__javaclass__ = 'java/lang/reflect/Modifier'
isAbstract = JavaStaticMethod('(I)Z')
isFinal = JavaStaticMethod('(I)Z')
isInterface = JavaStaticMethod('(I)Z')
isNative = JavaStaticMethod('(I)Z')
isPrivate = JavaStaticMethod('(I)Z')
isProtected = JavaStaticMethod('(I)Z')
isPublic = JavaStaticMethod('(I)Z')
isStatic = JavaStaticMethod('(I)Z')
isStrict = JavaStaticMethod('(I)Z')
isSynchronized = JavaStaticMethod('(I)Z')
isTransient = JavaStaticMethod('(I)Z')
isVolatile = JavaStaticMethod('(I)Z')
class Method(ClassBase):
__javaclass__ = 'java/lang/reflect/Method'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getReturnType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
class Field(ClassBase):
__javaclass__ = 'java/lang/reflect/Field'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
class Constructor(ClassBase):
__javaclass__ = 'java/lang/reflect/Constructor'
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
def get_signature(cls_tp):
tp = cls_tp.getName()
if tp[0] == '[':
return tp.replace('.', '/')
signatures = {
'void': 'V', 'boolean': 'Z', 'byte': 'B',
'char': 'C', 'short': 'S', 'int': 'I',
'long': 'J', 'float': 'F', 'double': 'D'}
ret = signatures.get(tp)
if ret:
return ret
# don't do it in recursive way for the moment,
# error on the JNI/android: JNI ERROR (app bug): local reference table overflow (max=512)
#
#ensureclass(tp)
return 'L{0};'.format(tp.replace('.', '/'))
registers = []
def ensureclass(clsname):
if clsname in registers:
return
jniname = clsname.replace('.', '/')
if MetaJavaClass.get_javaclass(jniname):
return
registers.append(clsname)
autoclass(clsname)
def iterator_wrapper(java_iter_fn):
def fn(self):
iterator = java_iter_fn()
while iterator.hasNext():
jobj = iterator.next()
if hasattr(jobj, '__javaclass__'):
yield cast(jobj.__javaclass__, jobj)
else:
yield jobj
return fn
def autoclass(clsname):
jniname = clsname.replace('.', '/')
cls = MetaJavaClass.get_javaclass(jniname)
if cls:
return cls
clsname = str(clsname)
classDict = {}
#c = Class.forName(clsname)
c = find_javaclass(clsname)
if c is None:
raise Exception('Java class {0} not found'.format(c))
return None
constructors = []
for constructor in c.getConstructors():
sig = '({0})V'.format(''.join(
[get_signature(x) for x in constructor.getParameterTypes()]))
constructors.append((sig, constructor.isVarArgs()))
classDict['__javaconstructor__'] = constructors
methods = c.getMethods()
methods_name = [x.getName() for x in methods]
for index, method in enumerate(methods):
name = methods_name[index]
if name in classDict:
continue
count = methods_name.count(name)
# only one method available
if count == 1:
static = Modifier.isStatic(method.getModifiers())
varargs = method.isVarArgs()
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
cls = JavaStaticMethod if static else JavaMethod
method = cls(sig, varargs=varargs)
classDict[name] = method
#if name == 'toString' and sig=='()Ljava/lang/String;':
# classDict['__str__'] = method
if name == 'iterator' and sig == '()Ljava/util/Iterator;':
classDict['__iter__'] = iterator_wrapper(classDict[name])
continue
# multpile signatures
signatures = []
for index, subname in enumerate(methods_name):
if subname != name:
continue
method = methods[index]
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
'''
print 'm', name, sig, method.getModifiers()
m = method.getModifiers()
print 'Public', Modifier.isPublic(m)
print 'Private', Modifier.isPrivate(m)
print 'Protected', Modifier.isProtected(m)
print 'Static', Modifier.isStatic(m)
print 'Final', Modifier.isFinal(m)
print 'Synchronized', Modifier.isSynchronized(m)
print 'Volatile', Modifier.isVolatile(m)
print 'Transient', Modifier.isTransient(m)
print 'Native', Modifier.isNative(m)
print 'Interface', Modifier.isInterface(m)
print 'Abstract', Modifier.isAbstract(m)
print 'Strict', Modifier.isStrict(m)
'''
signatures.append((sig, Modifier.isStatic(method.getModifiers()), method.isVarArgs()))
classDict[name] = JavaMultipleMethod(signatures)
for field in c.getFields():
static = Modifier.isStatic(field.getModifiers())
sig = get_signature(field.getType())
cls = JavaStaticField if static else JavaField
classDict[field.getName()] = cls(sig)
classDict['__javaclass__'] = clsname.replace('.', '/')
return MetaJavaClass.__new__(
MetaJavaClass,
clsname, # .replace('.', '_'),
(JavaClass, ),
classDict)
| {
"repo_name": "physion/pyjnius",
"path": "jnius/reflect.py",
"copies": "1",
"size": "8896",
"license": "mit",
"hash": 2010027509502155000,
"line_mean": 35.6090534979,
"line_max": 102,
"alpha_frac": 0.6215152878,
"autogenerated": false,
"ratio": 3.739386296763346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48609015845633463,
"avg_score": null,
"num_lines": null
} |
__all__ = ('autoclass', 'ensureclass')
from jnius import (
JavaClass, MetaJavaClass, JavaMethod, JavaStaticMethod,
JavaField, JavaStaticField, JavaMultipleMethod, find_javaclass
)
class Class(JavaClass):
__metaclass__ = MetaJavaClass
__javaclass__ = 'java/lang/Class'
desiredAssertionStatus = JavaMethod('()Z')
forName = JavaMultipleMethod([
('(Ljava/lang/String,Z,Ljava/lang/ClassLoader;)Ljava/langClass;', True, False),
('(Ljava/lang/String;)Ljava/lang/Class;', True, False), ])
getClassLoader = JavaMethod('()Ljava/lang/ClassLoader;')
getClasses = JavaMethod('()[Ljava/lang/Class;')
getComponentType = JavaMethod('()Ljava/lang/Class;')
getConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredClasses = JavaMethod('()[Ljava/lang/Class;')
getDeclaredConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getDeclaredConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getDeclaredFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getDeclaredMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getDeclaredMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getDeclaringClass = JavaMethod('()Ljava/lang/Class;')
getField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getInterfaces = JavaMethod('()[Ljava/lang/Class;')
getMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getModifiers = JavaMethod('()[I')
getName = JavaMethod('()Ljava/lang/String;')
getPackage = JavaMethod('()Ljava/lang/Package;')
getProtectionDomain = JavaMethod('()Ljava/security/ProtectionDomain;')
getResource = JavaMethod('(Ljava/lang/String;)Ljava/net/URL;')
getResourceAsStream = JavaMethod('(Ljava/lang/String;)Ljava/io/InputStream;')
getSigners = JavaMethod('()[Ljava/lang/Object;')
getSuperclass = JavaMethod('()Ljava/lang/reflect/Class;')
isArray = JavaMethod('()Z')
isAssignableFrom = JavaMethod('(Ljava/lang/reflect/Class;)Z')
isInstance = JavaMethod('(Ljava/lang/Object;)Z')
isInterface = JavaMethod('()Z')
isPrimitive = JavaMethod('()Z')
newInstance = JavaMethod('()Ljava/lang/Object;')
toString = JavaMethod('()Ljava/lang/String;')
class Object(JavaClass):
__metaclass__ = MetaJavaClass
__javaclass__ = 'java/lang/Object'
getClass = JavaMethod('()Ljava/lang/Class;')
hashCode = JavaMethod('()I')
class Modifier(JavaClass):
__metaclass__ = MetaJavaClass
__javaclass__ = 'java/lang/reflect/Modifier'
isAbstract = JavaStaticMethod('(I)Z')
isFinal = JavaStaticMethod('(I)Z')
isInterface = JavaStaticMethod('(I)Z')
isNative = JavaStaticMethod('(I)Z')
isPrivate = JavaStaticMethod('(I)Z')
isProtected = JavaStaticMethod('(I)Z')
isPublic = JavaStaticMethod('(I)Z')
isStatic = JavaStaticMethod('(I)Z')
isStrict = JavaStaticMethod('(I)Z')
isSynchronized = JavaStaticMethod('(I)Z')
isTransient = JavaStaticMethod('(I)Z')
isVolatile = JavaStaticMethod('(I)Z')
class Method(JavaClass):
__metaclass__ = MetaJavaClass
__javaclass__ = 'java/lang/reflect/Method'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getReturnType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
class Field(JavaClass):
__metaclass__ = MetaJavaClass
__javaclass__ = 'java/lang/reflect/Field'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
class Constructor(JavaClass):
__metaclass__ = MetaJavaClass
__javaclass__ = 'java/lang/reflect/Constructor'
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
def get_signature(cls_tp):
tp = cls_tp.getName()
if tp[0] == '[':
return tp.replace('.', '/')
signatures = {
'void': 'V', 'boolean': 'Z', 'byte': 'B',
'char': 'C', 'short': 'S', 'int': 'I',
'long': 'J', 'float': 'F', 'double': 'D'}
ret = signatures.get(tp)
if ret:
return ret
# don't do it in recursive way for the moment,
# error on the JNI/android: JNI ERROR (app bug): local reference table overflow (max=512)
#
#ensureclass(tp)
return 'L{0};'.format(tp.replace('.', '/'))
registers = []
def ensureclass(clsname):
if clsname in registers:
return
jniname = clsname.replace('.', '/')
if MetaJavaClass.get_javaclass(jniname):
return
registers.append(clsname)
autoclass(clsname)
def autoclass(clsname):
jniname = clsname.replace('.', '/')
cls = MetaJavaClass.get_javaclass(jniname)
if cls:
return cls
classDict = {}
#c = Class.forName(clsname)
c = find_javaclass(clsname)
if c is None:
raise Exception('Java class {0} not found'.format(c))
return None
constructors = []
for constructor in c.getConstructors():
sig = '({0})V'.format(
''.join([get_signature(x) for x in constructor.getParameterTypes()]))
constructors.append((sig, constructor.isVarArgs()))
classDict['__javaconstructor__'] = constructors
methods = c.getMethods()
methods_name = [x.getName() for x in methods]
for index, method in enumerate(methods):
name = methods_name[index]
if name in classDict:
continue
count = methods_name.count(name)
# only one method available
if count == 1:
static = Modifier.isStatic(method.getModifiers())
varargs = method.isVarArgs()
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
cls = JavaStaticMethod if static else JavaMethod
classDict[name] = cls(sig, varargs=varargs)
continue
# multpile signatures
signatures = []
for index, subname in enumerate(methods_name):
if subname != name:
continue
method = methods[index]
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
'''
print 'm', name, sig, method.getModifiers()
m = method.getModifiers()
print 'Public', Modifier.isPublic(m)
print 'Private', Modifier.isPrivate(m)
print 'Protected', Modifier.isProtected(m)
print 'Static', Modifier.isStatic(m)
print 'Final', Modifier.isFinal(m)
print 'Synchronized', Modifier.isSynchronized(m)
print 'Volatile', Modifier.isVolatile(m)
print 'Transient', Modifier.isTransient(m)
print 'Native', Modifier.isNative(m)
print 'Interface', Modifier.isInterface(m)
print 'Abstract', Modifier.isAbstract(m)
print 'Strict', Modifier.isStrict(m)
'''
signatures.append((sig, Modifier.isStatic(method.getModifiers()), method.isVarArgs()))
classDict[name] = JavaMultipleMethod(signatures)
for field in c.getFields():
static = Modifier.isStatic(field.getModifiers())
sig = get_signature(field.getType())
cls = JavaStaticField if static else JavaField
classDict[field.getName()] = cls(sig)
classDict['__javaclass__'] = clsname.replace('.', '/')
return MetaJavaClass.__new__(
MetaJavaClass,
clsname, # .replace('.', '_'),
(JavaClass, ),
classDict)
| {
"repo_name": "aolihu/pyjnius",
"path": "jnius/reflect.py",
"copies": "5",
"size": "8262",
"license": "mit",
"hash": -5083033008726981000,
"line_mean": 36.2162162162,
"line_max": 102,
"alpha_frac": 0.6319293149,
"autogenerated": false,
"ratio": 3.708258527827648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6840187842727649,
"avg_score": null,
"num_lines": null
} |
# All available types:
# Body -> Body
# Decal -> Decal
# Wheel -> Wheels
# Boost -> Rocket Boost
# Antenna -> Antenna
# Topper -> Topper
# Trail -> Trail
# Goal Explosion -> Goal Explosion
# Paint Finish -> Paint Finish
# Engine Audio -> Engine Audio
# Unwanted:
# Crates
# Banner -> Player Banner
if __name__ == '__main__':
import json
items = {}
with open('Rocket_League_Items.txt', 'r') as f:
item_lines = []
for line in f:
if line.startswith("LongLabel: ") and item_lines: # We got to a new item, now parse latest one
item = {}
slot = ""
for s in item_lines:
split_line = [x.strip() for x in s.split(':', 1)]
if not split_line[0]:
continue
key = split_line[0]
value = split_line[1]
if key == "Slot":
slot = value
else:
item[key] = value
if not (slot == "Crates" or slot == "Player Banner"):
try:
items[slot].append(item)
except KeyError:
items[slot] = [item]
item_lines.clear()
item_lines.append(line)
with open("src/main/python/rlbot/gui/rocket_league_items.json", "w") as f:
json.dump(items, f)
| {
"repo_name": "drssoccer55/RLBot",
"path": "json_items_file_creator.py",
"copies": "1",
"size": "1431",
"license": "mit",
"hash": -7432100661885742000,
"line_mean": 29.4468085106,
"line_max": 107,
"alpha_frac": 0.464011181,
"autogenerated": false,
"ratio": 3.953038674033149,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9809681848728973,
"avg_score": 0.021473601260835303,
"num_lines": 47
} |
__all__ = ['Backend']
from sklearn.cross_validation import train_test_split
from .storage import *
class Backend(object):
def __init__(self):
self._data = None
self._model = None
def load(self, dataset, write=False):
self._model = None
self._data = dataset['data']
self._target = dataset.get('target')
if write:
self._write_dataset(dataset)
return True
def isloaded(self, read=False):
if getattr(self, '_data', None) is not None:
return True
elif read:
return bool(self._read_dataset())
else:
return False
def fit(self, write=True):
self._ensure_loaded(read=True)
model = self.model()
model.fit(self._data, self._target)
self._model = model
if write:
self._write_model()
return True
def istrained(self, read=False):
if getattr(self, '_model', None) is not None:
return True
elif read:
return bool(self._read_model())
else:
return False
def predict(self, value):
self._ensure_trained(read=True)
return self._model.predict(value).tolist()
def predict_probabilities(self, value):
self._ensure_trained(read=True)
return self._model.predict_proba(value).tolist()
def _read_dataset(self):
dataset = read_key(DATASET_KEY)
if dataset is not None:
self.load(dataset, write=False)
else:
return False
def _write_dataset(self, dataset):
self._ensure_trained()
write_key(DATASET_KEY, dataset)
def _read_model(self):
self._model = read_key(MODEL_KEY)
return bool(self._model)
def _write_model(self):
self._ensure_trained()
write_key(MODEL_KEY, self._model)
def _ensure_loaded(self, read=False):
if not self.isloaded(read):
raise RuntimeError
def _ensure_trained(self, read=False):
if not self.istrained(read):
raise RuntimeError
| {
"repo_name": "norbert/fickle",
"path": "fickle/backend.py",
"copies": "1",
"size": "2100",
"license": "mit",
"hash": -5936361260683574000,
"line_mean": 25.25,
"line_max": 56,
"alpha_frac": 0.57,
"autogenerated": false,
"ratio": 4.030710172744722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 80
} |
__all__ = ('backend', 'SearchableModel', 'SearchableManager')
from cgi import parse_qsl
from django.core import signals
from fts.backends.base import InvalidFtsBackendError
from fts.settings import *
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'pgsql': 'pgsql',
'mysql': 'mysql',
'sphinx': 'sphinx',
'xapian': 'xapian',
'simple': 'simple',
'dummy': 'dummy',
}
def get_fts(backend_uri):
if backend_uri.find(':') == -1:
raise InvalidFtsBackendError, "Backend URI must start with scheme://"
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidFtsBackendError, "Backend URI must start with scheme://"
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
if scheme in BACKENDS:
module = __import__('fts.backends.%s' % BACKENDS[scheme], {}, {}, [''])
else:
module = __import__(scheme, {}, {}, [''])
return getattr(module, 'SearchClass')(host, params), getattr(module, 'SearchableModel'), getattr(module, 'SearchManager')
_fts, SearchableModel, SearchManager = get_fts(FTS_BACKEND)
backend = _fts.backend
| {
"repo_name": "ryszard/django-fts",
"path": "fts/__init__.py",
"copies": "1",
"size": "1487",
"license": "bsd-3-clause",
"hash": 5837223137737742000,
"line_mean": 31.0444444444,
"line_max": 125,
"alpha_frac": 0.6065904506,
"autogenerated": false,
"ratio": 3.5831325301204817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.968009843059779,
"avg_score": 0.0019249100245382642,
"num_lines": 45
} |
__all__ = ['BaseAgentSet', 'BaseAgent']
from pysb import Annotation
from indra.statements import *
from .common import _n
from .sites import states, get_binding_site_name, get_mod_site_name
class BaseAgentSet(object):
"""Container for a dict of BaseAgents with their names as keys."""
def __init__(self):
self.agents = {}
def get_create_base_agent(self, agent):
"""Return base agent with given name, creating it if needed."""
try:
base_agent = self.agents[_n(agent.name)]
except KeyError:
base_agent = BaseAgent(_n(agent.name))
self.agents[_n(agent.name)] = base_agent
# If it's a molecular agent
if isinstance(agent, Agent):
# Handle bound conditions
for bc in agent.bound_conditions:
bound_base_agent = self.get_create_base_agent(bc.agent)
bound_base_agent.create_site(get_binding_site_name(agent))
base_agent.create_site(get_binding_site_name(bc.agent))
# Handle modification conditions
for mc in agent.mods:
base_agent.create_mod_site(mc)
# Handle mutation conditions
for mc in agent.mutations:
res_from = mc.residue_from if mc.residue_from else 'mut'
res_to = mc.residue_to if mc.residue_to else 'X'
if mc.position is None:
mut_site_name = res_from
else:
mut_site_name = res_from + mc.position
base_agent.create_site(mut_site_name, states=['WT', res_to])
# Handle location condition
if agent.location is not None:
base_agent.create_site('loc', [_n(agent.location)])
# Handle activity
if agent.activity is not None:
site_name = agent.activity.activity_type
base_agent.create_site(site_name, ['inactive', 'active'])
# There might be overwrites here
for db_name, db_ref in agent.db_refs.items():
base_agent.db_refs[db_name] = db_ref
return base_agent
def items(self):
"""Return items for the set of BaseAgents that this class wraps.
"""
return self.agents.items()
def __getitem__(self, name):
return self.agents[name]
class BaseAgent(object):
"""A BaseAgent aggregates the global properties of an Agent.
The BaseAgent class aggregates the name, sites, site states, active forms,
inactive forms and database references of Agents from individual INDRA
Statements. This allows the PySB Assembler to correctly assemble the
Monomer signatures in the model.
"""
def __init__(self, name):
self.name = name
self.sites = []
self.site_states = {}
self.site_annotations = []
# The list of site/state configurations that lead to this agent
# being active (where the agent is currently assumed to have only
# one type of activity)
self.active_forms = []
self.activity_types = []
self.inactive_forms = []
self.db_refs = {}
def create_site(self, site, states=None):
"""Create a new site on an agent if it doesn't already exist."""
if site not in self.sites:
self.sites.append(site)
if states is not None:
self.site_states.setdefault(site, [])
try:
states = list(states)
except TypeError:
return
self.add_site_states(site, states)
def create_mod_site(self, mc):
"""Create modification site for the BaseAgent from a ModCondition."""
site_name = get_mod_site_name(mc)
(unmod_site_state, mod_site_state) = states[mc.mod_type]
self.create_site(site_name, (unmod_site_state, mod_site_state))
site_anns = [Annotation((site_name, mod_site_state), mc.mod_type,
'is_modification')]
if mc.residue:
site_anns.append(Annotation(site_name, mc.residue, 'is_residue'))
if mc.position:
site_anns.append(Annotation(site_name, mc.position, 'is_position'))
self.site_annotations += site_anns
def add_site_states(self, site, states):
"""Create new states on an agent site if the state doesn't exist."""
for state in states:
if state not in self.site_states[site]:
self.site_states[site].append(state)
def add_activity_form(self, activity_pattern, is_active):
"""Adds the pattern as an active or inactive form to an Agent.
Parameters
----------
activity_pattern : dict
A dictionary of site names and their states.
is_active : bool
Is True if the given pattern corresponds to an active state.
"""
if is_active:
if activity_pattern not in self.active_forms:
self.active_forms.append(activity_pattern)
else:
if activity_pattern not in self.inactive_forms:
self.inactive_forms.append(activity_pattern)
def add_activity_type(self, activity_type):
"""Adds an activity type to an Agent.
Parameters
----------
activity_type : str
The type of activity to add such as 'activity', 'kinase',
'gtpbound'
"""
if activity_type not in self.activity_types:
self.activity_types.append(activity_type)
| {
"repo_name": "sorgerlab/indra",
"path": "indra/assemblers/pysb/base_agents.py",
"copies": "6",
"size": "5540",
"license": "bsd-2-clause",
"hash": 6316065619784354000,
"line_mean": 36.1812080537,
"line_max": 79,
"alpha_frac": 0.5862815884,
"autogenerated": false,
"ratio": 4.1467065868263475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006711409395973154,
"num_lines": 149
} |
"""All Base Classes"""
import atexit
import logging
import os
import time
import sys
from signal import SIGTERM
LOG = logging.getLogger(__name__)
class BaseDaemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
Refer: http://web.archive.org/web/20131025230048/http://www.jejik.com/\
articles/2007/02/a_simple_unix_linux_daemon_in_python/
"""
def __init__(self, pidfile):
self.pidfile = pidfile
sys.stdout = ControlLogger(LOG, logging.INFO)
sys.stderr = ControlLogger(LOG, logging.ERROR)
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" %
(e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" %
(e.errno, e.strerror))
sys.exit(1)
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
open(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def status(self):
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
for line in open("/proc/%d/status" % pid).readlines():
print(line)
except IOError as e:
message = 'Unable to open %s pidfile - %s'
sys.stderr.write(message % (self.pidfile, e))
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
"""
class ControlLogger(object):
"""
Fake file-like stream object that redirects writes
to a logger instance.
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
# Only log if there is a message (not just a new line)
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
| {
"repo_name": "KDBk/lan_sync_controller",
"path": "lan_sync_controller/base.py",
"copies": "2",
"size": "4295",
"license": "mit",
"hash": -3941883183801560600,
"line_mean": 26.1835443038,
"line_max": 75,
"alpha_frac": 0.5129220023,
"autogenerated": false,
"ratio": 4.094375595805529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.560729759810553,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BaseClassifier', 'load_classifier']
from io import BytesIO
from datetime import datetime
import logging
import pickle
import tarfile
import time
from uuid import uuid4
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.exceptions import NotFittedError
from ..utils import Timer
from ..utils import parse_n_jobs
logger = logging.getLogger(__name__)
class BaseClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_jobs=1, **kwargs):
super(BaseClassifier, self).__init__()
self.n_jobs = parse_n_jobs(n_jobs)
self.n_jobs_string = n_jobs
#end def
def fit(self, X, Y, validation_data=None, **kwargs):
self.uuid_ = str(uuid4())
logger.debug('{} UUID is {}.'.format(self.name, self.uuid_))
self.fitted_at_ = datetime.utcnow()
timer = Timer()
self._fit(X, Y, validation_data=validation_data, **kwargs)
logger.info('{} fitting on {} instances complete {}.'.format(self.name, X.shape[0], timer))
return self
#end def
def _fit(self, *args, **kwargs): raise NotImplementedError('_fit is not implemented.')
def _predict_proba(self, X_featurized, **kwargs):
raise NotImplementedError('_predict_proba is not implemented.')
def predict_proba(self, X_featurized, **kwargs):
timer = Timer()
Y_proba = self._predict_proba(X_featurized, **kwargs)
logger.debug('Computed prediction probabilities on {} instances {}.'.format(X_featurized.shape[0], timer))
return Y_proba
#end def
def predict(self, X_featurized, **kwargs):
timer = Timer()
Y_proba = self._predict_proba(X_featurized, **kwargs)
Y_predict = Y_proba >= 0.5
logger.debug('Computed predictions on {} instances {}.'.format(X_featurized.shape[0], timer))
return Y_predict
#end def
def predict_and_proba(self, X_featurized, **kwargs):
timer = Timer()
Y_proba = self._predict_proba(X_featurized, **kwargs)
Y_predict = Y_proba >= 0.5
logger.debug('Computed predictions and probabilities on {} instances {}.'.format(X_featurized.shape[0], timer))
return Y_proba, Y_predict
#end def
def decision_function(self, *args, **kwargs): return self.predict_proba(*args, **kwargs)
def save(self, f):
if not hasattr(self, 'uuid_'):
raise NotFittedError('This featurizer is not fitted yet.')
with tarfile.open(fileobj=f, mode='w') as tf:
with BytesIO() as model_f:
try: pickle.dump(self, model_f, protocol=4)
except pickle.PicklingError:
logger.error('PicklingError: Did you check to make sure that the classifier mixins (i.e., KerasNNClassifierMixin) is ahead of BaseClassifier in the MRO?')
raise
#end try
model_data = model_f.getvalue()
model_f.seek(0)
model_tarinfo = tarfile.TarInfo(name='model.pkl')
model_tarinfo.size = len(model_data)
model_tarinfo.mtime = int(time.time())
tf.addfile(tarinfo=model_tarinfo, fileobj=model_f)
#end with
self.save_to_tarfile(tf)
#end with
f.close()
logger.info('{} saved to <{}>.'.format(self, f.name))
return self
#end def
def save_to_tarfile(self, tf): return self
def load_from_tarfile(self, tf): return self
@property
def uuid(self):
return self.uuid_
#end def
@property
def name(self):
return type(self).__name__
@property
def classes_(self): raise NotImplementedError('classes_ is not implemented.')
def __str__(self):
return '{}(UUID={})'.format(self.name, self.uuid_ if hasattr(self, 'uuid_') else 'None')
#end class
def load_classifier(f):
with tarfile.open(fileobj=f, mode='r') as tf:
classifier = pickle.load(tf.extractfile('model.pkl'))
classifier.load_from_tarfile(tf)
#end with
logger.info('Loaded {} from <{}>.'.format(classifier, f.name))
return classifier
#end def
| {
"repo_name": "skylander86/ycml",
"path": "ycml/classifiers/base.py",
"copies": "1",
"size": "4155",
"license": "apache-2.0",
"hash": -6448527370637989000,
"line_mean": 30.0074626866,
"line_max": 174,
"alpha_frac": 0.6137184116,
"autogenerated": false,
"ratio": 3.8014638609332114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4915182272533211,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BaseConnector', 'TCPConnector', 'ProxyConnector', 'UnixConnector']
import asyncio
import aiohttp
import functools
import http.cookies
import time
import ssl
import socket
import weakref
from .client import ClientRequest
from .errors import ServerDisconnectedError
from .errors import HttpProxyError, ProxyConnectionError
from .errors import ClientOSError, ClientTimeoutError
from .helpers import BasicAuth
class Connection(object):
def __init__(self, connector, key, request, transport, protocol, loop):
self._key = key
self._connector = connector
self._request = request
self._transport = transport
self._protocol = protocol
self._loop = loop
self.reader = protocol.reader
self.writer = protocol.writer
self._wr = weakref.ref(self, lambda wr, tr=self._transport: tr.close())
@property
def loop(self):
return self._loop
def close(self):
if self._transport is not None:
self._connector._release(
self._key, self._request, self._transport, self._protocol,
should_close=True)
self._transport = None
self._wr = None
def release(self):
if self._transport:
self._connector._release(
self._key, self._request, self._transport, self._protocol)
self._transport = None
self._wr = None
def share_cookies(self, cookies):
if self._connector._share_cookies: # XXX
self._connector.update_cookies(cookies)
class BaseConnector(object):
"""Base connector class.
:param conn_timeout: (optional) Connect timeout.
:param keepalive_timeout: (optional) Keep-alive timeout.
:param bool share_cookies: Set to True to keep cookies between requests.
:param bool force_close: Set to True to froce close and do reconnect
after each request (and between redirects).
:param loop: Optional event loop.
"""
def __init__(self, *, conn_timeout=None, keepalive_timeout=30,
share_cookies=False, force_close=False, loop=None):
self._conns = {}
self._conn_timeout = conn_timeout
self._keepalive_timeout = keepalive_timeout
self._share_cookies = share_cookies
self._cleanup_handle = None
self._force_close = force_close
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._factory = functools.partial(
aiohttp.StreamProtocol, loop=loop,
disconnect_error=ServerDisconnectedError)
self.cookies = http.cookies.SimpleCookie()
self._wr = weakref.ref(
self, lambda wr, f=self._do_close, conns=self._conns: f(conns))
def _cleanup(self):
"""Cleanup unused transports."""
if self._cleanup_handle:
self._cleanup_handle.cancel()
self._cleanup_handle = None
now = time.time()
connections = {}
for key, conns in self._conns.items():
alive = []
for transport, proto, t0 in conns:
if transport is not None:
if proto and not proto.is_connected():
transport = None
elif (now - t0) > self._keepalive_timeout:
transport.close()
transport = None
if transport:
alive.append((transport, proto, t0))
if alive:
connections[key] = alive
if connections:
self._cleanup_handle = self._loop.call_later(
self._keepalive_timeout, self._cleanup)
self._conns = connections
self._wr = weakref.ref(
self, lambda wr, f=self._do_close, conns=self._conns: f(conns))
def _start_cleanup_task(self):
if self._cleanup_handle is None:
self._cleanup_handle = self._loop.call_later(
self._keepalive_timeout, self._cleanup)
def close(self):
"""Close all opened transports."""
self._do_close(self._conns)
@staticmethod
def _do_close(conns):
for key, data in conns.items():
for transport, proto, td in data:
transport.close()
conns.clear()
def update_cookies(self, cookies):
"""Update shared cookies."""
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(self.cookies, name, value)
else:
self.cookies[name] = value
@asyncio.coroutine
def connect(self, req):
"""Get from pool or create new connection."""
key = (req.host, req.port, req.ssl)
if self._share_cookies:
req.update_cookies(self.cookies.items())
transport, proto = self._get(key)
if transport is None:
try:
if self._conn_timeout:
transport, proto = yield from asyncio.wait_for(
self._create_connection(req),
self._conn_timeout, loop=self._loop)
else:
transport, proto = yield from self._create_connection(req)
except asyncio.TimeoutError as exc:
raise ClientTimeoutError(
'Connection timeout to host %s:%s ssl:%s' % key) from exc
except OSError as exc:
raise ClientOSError(
'Cannot connect to host %s:%s ssl:%s' % key) from exc
return Connection(self, key, req, transport, proto, self._loop)
def _get(self, key):
conns = self._conns.get(key)
while conns:
transport, proto, t0 = conns.pop()
if transport is not None and proto.is_connected():
if (time.time() - t0) > self._keepalive_timeout:
transport.close()
transport = None
else:
return transport, proto
return None, None
def _release(self, key, req, transport, protocol, *, should_close=False):
resp = req.response
if not should_close:
if resp is not None:
if resp.message is None:
should_close = True
else:
should_close = resp.message.should_close
if self._force_close:
should_close = True
reader = protocol.reader
if should_close or (reader.output and not reader.output.at_eof()):
self._conns.pop(key, None)
transport.close()
else:
conns = self._conns.get(key)
if conns is None:
conns = self._conns[key] = []
conns.append((transport, protocol, time.time()))
reader.unset_parser()
self._start_cleanup_task()
def _create_connection(self, req, *args, **kwargs):
raise NotImplementedError()
_SSL_OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
_SSH_HAS_CREATE_DEFAULT_CONTEXT = hasattr(ssl, 'create_default_context')
class TCPConnector(BaseConnector):
"""TCP connector.
:param bool verify_ssl: Set to True to check ssl certifications.
:param bool resolve: Set to True to do DNS lookup for host name.
:param familiy: socket address family
:param args: see :class:`BaseConnector`
:param kwargs: see :class:`BaseConnector`
"""
def __init__(self, *args, verify_ssl=True,
resolve=False, family=socket.AF_INET, ssl_context=None,
**kwargs):
if not verify_ssl and ssl_context is not None:
raise ValueError(
"Either disable ssl certificate validation by "
"verify_ssl=False or specify ssl_context, not both.")
super().__init__(*args, **kwargs)
self._verify_ssl = verify_ssl
self._ssl_context = ssl_context
self._family = family
self._resolve = resolve
self._resolved_hosts = {}
@property
def verify_ssl(self):
"""Do check for ssl certifications?"""
return self._verify_ssl
@property
def ssl_context(self):
"""SSLContext instance for https requests.
Lazy property, creates context on demand.
"""
if self._ssl_context is None:
if not self._verify_ssl:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.options |= _SSL_OP_NO_COMPRESSION
sslcontext.set_default_verify_paths()
elif _SSH_HAS_CREATE_DEFAULT_CONTEXT:
# Python 3.4+
sslcontext = ssl.create_default_context()
else: # pragma: no cover
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.options |= _SSL_OP_NO_COMPRESSION
sslcontext.set_default_verify_paths()
sslcontext.verify_mode = ssl.CERT_REQUIRED
self._ssl_context = sslcontext
return self._ssl_context
@property
def family(self):
"""Socket family like AF_INET."""
return self._family
@property
def resolve(self):
"""Do DNS lookup for host name?"""
return self._resolve
@property
def resolved_hosts(self):
"""The dict of (host, port) -> (ipaddr, port) pairs."""
return dict(self._resolved_hosts)
def clear_resolved_hosts(self, host=None, port=None):
"""Remove specified host/port or clear all resolve cache."""
if host is not None and port is not None:
key = (host, port)
if key in self._resolved_hosts:
del self._resolved_hosts[key]
else:
self._resolved_hosts.clear()
@asyncio.coroutine
def _resolve_host(self, host, port):
if self._resolve:
key = (host, port)
if key not in self._resolved_hosts:
infos = yield from self._loop.getaddrinfo(
host, port, type=socket.SOCK_STREAM, family=self._family)
hosts = []
for family, _, proto, _, address in infos:
hosts.append(
{'hostname': host,
'host': address[0], 'port': address[1],
'family': family, 'proto': proto,
'flags': socket.AI_NUMERICHOST})
self._resolved_hosts[key] = hosts
return list(self._resolved_hosts[key])
else:
return [{'hostname': host, 'host': host, 'port': port,
'family': self._family, 'proto': 0, 'flags': 0}]
def _create_connection(self, req, **kwargs):
"""Create connection.
Has same keyword arguments as BaseEventLoop.create_connection.
"""
if req.ssl:
sslcontext = self.ssl_context
else:
sslcontext = None
hosts = yield from self._resolve_host(req.host, req.port)
while hosts:
hinfo = hosts.pop()
try:
return (yield from self._loop.create_connection(
self._factory, hinfo['host'], hinfo['port'],
ssl=sslcontext, family=hinfo['family'],
proto=hinfo['proto'], flags=hinfo['flags'],
server_hostname=hinfo['hostname'] if sslcontext else None,
**kwargs))
except OSError as exc:
if not hosts:
raise ClientOSError('Can not connect to %s:%s' %
(req.host, req.port)) from exc
class ProxyConnector(TCPConnector):
"""Http Proxy connector.
:param str proxy: Proxy URL address. Only http proxy supported.
:param proxy_auth: (optional) Proxy HTTP Basic Auth
:type proxy_auth: aiohttp.helpers.BasicAuth
:param args: see :class:`TCPConnector`
:param kwargs: see :class:`TCPConnector`
Usage:
>>> conn = ProxyConnector(proxy="http://some.proxy.com")
>>> resp = yield from request('GET', 'http://python.org', connector=conn)
"""
def __init__(self, proxy, *args, proxy_auth=None, **kwargs):
super().__init__(*args, **kwargs)
self._proxy = proxy
self._proxy_auth = proxy_auth
assert proxy.startswith('http://'), (
"Only http proxy supported", proxy)
assert proxy_auth is None or isinstance(proxy_auth, BasicAuth), (
"proxy_auth must be None or BasicAuth() tuple", proxy_auth)
@property
def proxy(self):
"""Proxy URL."""
return self._proxy
@asyncio.coroutine
def _create_connection(self, req, **kwargs):
proxy_req = ClientRequest(
'GET', self._proxy,
headers={'Host': req.host},
auth=self._proxy_auth,
loop=self._loop)
try:
transport, proto = yield from super()._create_connection(proxy_req)
except OSError as exc:
raise ProxyConnectionError(*exc.args) from exc
req.path = '{scheme}://{host}{path}'.format(scheme=req.scheme,
host=req.netloc,
path=req.path)
if 'AUTHORIZATION' in proxy_req.headers:
auth = proxy_req.headers['AUTHORIZATION']
del proxy_req.headers['AUTHORIZATION']
req.headers['PROXY-AUTHORIZATION'] = auth
if req.ssl:
# For HTTPS requests over HTTP proxy
# we must notify proxy to tunnel connection
# so we send CONNECT command:
# CONNECT www.python.org:443 HTTP/1.1
# Host: www.python.org
#
# next we must do TLS handshake and so on
# to do this we must wrap raw socket into secure one
# asyncio handles this perfectly
proxy_req.method = 'CONNECT'
proxy_req.path = '{}:{}'.format(req.host, req.port)
key = (req.host, req.port, req.ssl)
conn = Connection(self, key, proxy_req,
transport, proto, self._loop)
proxy_resp = proxy_req.send(conn.writer, conn.reader)
try:
resp = yield from proxy_resp.start(conn, True)
except:
proxy_resp.close()
conn.close()
raise
else:
if resp.status != 200:
raise HttpProxyError(code=resp.status, message=resp.reason)
rawsock = transport.get_extra_info('socket', default=None)
if rawsock is None:
raise RuntimeError(
"Transport does not expose socket instance")
transport.pause_reading()
transport, proto = yield from self._loop.create_connection(
self._factory, ssl=True, sock=rawsock,
server_hostname=req.host, **kwargs)
return transport, proto
class UnixConnector(BaseConnector):
"""Unix socket connector.
:param str path: Unix socket path.
:param args: see :class:`BaseConnector`
:param kwargs: see :class:`BaseConnector`
Usage:
>>> conn = UnixConnector(path='/path/to/socket')
>>> resp = yield from request('GET', 'http://python.org', connector=conn)
"""
def __init__(self, path, *args, **kw):
super().__init__(*args, **kw)
self._path = path
@property
def path(self):
"""Path to unix socket."""
return self._path
@asyncio.coroutine
def _create_connection(self, req, **kwargs):
return (yield from self._loop.create_unix_connection(
self._factory, self._path, **kwargs))
| {
"repo_name": "saghul/aiohttp",
"path": "aiohttp/connector.py",
"copies": "1",
"size": "16267",
"license": "apache-2.0",
"hash": 4128179175477974500,
"line_mean": 33.9077253219,
"line_max": 79,
"alpha_frac": 0.554066515,
"autogenerated": false,
"ratio": 4.428804791723387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5482871306723387,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BaseConnector', 'TCPConnector', 'UnixConnector',
'SocketConnector', 'UnixSocketConnector', 'ProxyConnector']
import asyncio
import aiohttp
import functools
import http.cookies
import time
import ssl
import socket
import weakref
from .errors import HttpProxyError
from .errors import ProxyConnectionError
from .client import ClientRequest
class Connection(object):
def __init__(self, connector, key, request, transport, protocol):
self._key = key
self._connector = connector
self._request = request
self._transport = transport
self._protocol = protocol
self.reader = protocol.reader
self.writer = protocol.writer
self._wr = weakref.ref(self, lambda wr, tr=self._transport: tr.close())
def close(self):
if self._transport is not None:
self._transport.close()
self._transport = None
self._wr = None
def release(self):
if self._transport:
self._connector._release(
self._key, self._request, self._transport, self._protocol)
self._transport = None
self._wr = None
class BaseConnector(object):
def __init__(self, *, reuse_timeout=30, conn_timeout=None,
share_cookies=False, loop=None, **kwargs):
self._conns = {}
self._reuse_timeout = reuse_timeout
self._conn_timeout = conn_timeout
self._share_cookies = share_cookies
self._cleanup_handle = None
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._factory = functools.partial(aiohttp.StreamProtocol, loop=loop)
self.cookies = http.cookies.SimpleCookie()
self._wr = weakref.ref(self,
lambda wr, f=self._do_close, conns=self._conns:
f(conns))
def _cleanup(self):
"""Cleanup unused transports."""
if self._cleanup_handle:
self._cleanup_handle.cancel()
self._cleanup_handle = None
now = time.time()
connections = {}
for key, conns in self._conns.items():
alive = []
for transport, proto, t0 in conns:
if transport is not None:
if proto and not proto.is_connected():
transport = None
elif (now - t0) > self._reuse_timeout:
transport.close()
transport = None
if transport:
alive.append((transport, proto, t0))
if alive:
connections[key] = alive
if connections:
self._cleanup_handle = self._loop.call_later(
self._reuse_timeout, self._cleanup)
self._conns = connections
self._wr = weakref.ref(self,
lambda wr, f=self._do_close, conns=self._conns:
f(conns))
def _start_cleanup_task(self):
if self._cleanup_handle is None:
self._cleanup_handle = self._loop.call_later(
self._reuse_timeout, self._cleanup)
def close(self):
"""Close all opened transports."""
self._do_close(self._conns)
@staticmethod
def _do_close(conns):
for key, data in conns.items():
for transport, proto, td in data:
transport.close()
conns.clear()
def update_cookies(self, cookies):
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(self.cookies, name, value)
else:
self.cookies[name] = value
@asyncio.coroutine
def connect(self, req):
key = (req.host, req.port, req.ssl)
if self._share_cookies:
req.update_cookies(self.cookies.items())
transport, proto = self._get(key)
if transport is None:
if self._conn_timeout:
transport, proto = yield from asyncio.wait_for(
self._create_connection(req),
self._conn_timeout, loop=self._loop)
else:
transport, proto = yield from self._create_connection(req)
return Connection(self, key, req, transport, proto)
def _get(self, key):
conns = self._conns.get(key)
while conns:
transport, proto, t0 = conns.pop()
if transport is not None and proto.is_connected():
if (time.time() - t0) > self._reuse_timeout:
transport.close()
transport = None
else:
return transport, proto
return None, None
def _release(self, key, req, transport, protocol):
resp = req.response
should_close = False
if resp is not None:
if resp.message is None:
should_close = True
else:
should_close = resp.message.should_close
if self._share_cookies and resp.cookies:
self.update_cookies(resp.cookies.items())
reader = protocol.reader
if should_close or (reader.output and not reader.output.at_eof()):
transport.close()
else:
conns = self._conns.get(key)
if conns is None:
conns = self._conns[key] = []
conns.append((transport, protocol, time.time()))
reader.unset_parser()
self._start_cleanup_task()
def _create_connection(self, req, *args, **kwargs):
raise NotImplementedError()
class TCPConnector(BaseConnector):
def __init__(self, *args, verify_ssl=True,
resolve=False, family=socket.AF_INET, **kwargs):
super().__init__(*args, **kwargs)
self._verify_ssl = verify_ssl
self._family = family
self._resolve = resolve
self._resolved_hosts = {}
def clear_resolved_hosts(self, host=None, port=None):
if host is not None and port is not None:
key = (host, port)
if key in self._resolved_hosts:
del self._resolved_hosts[key]
else:
self._resolved_hosts.clear()
@asyncio.coroutine
def _resolve_host(self, host, port):
if self._resolve:
key = (host, port)
if key not in self._resolved_hosts:
infos = yield from self._loop.getaddrinfo(
host, port, type=socket.SOCK_STREAM, family=self._family)
hosts = []
for family, _, proto, _, address in infos:
hosts.append(
{'hostname': host,
'host': address[0], 'port': address[1],
'family': family, 'proto': proto,
'flags': socket.AI_NUMERICHOST})
self._resolved_hosts[key] = hosts
return list(self._resolved_hosts[key])
else:
return [{'hostname': host, 'host': host, 'port': port,
'family': self._family, 'proto': 0, 'flags': 0}]
def _create_connection(self, req, **kwargs):
"""Create connection. Has same keyword arguments
as BaseEventLoop.create_connection
"""
sslcontext = req.ssl
if req.ssl and not self._verify_ssl:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.set_default_verify_paths()
hosts = yield from self._resolve_host(req.host, req.port)
while hosts:
hinfo = hosts.pop()
try:
return (yield from self._loop.create_connection(
self._factory, hinfo['host'], hinfo['port'],
ssl=sslcontext, family=hinfo['family'],
proto=hinfo['proto'], flags=hinfo['flags'],
server_hostname=hinfo['hostname'] if sslcontext else None,
**kwargs))
except OSError:
if not hosts:
raise
class ProxyConnector(TCPConnector):
"""Http Proxy connector."""
def __init__(self, proxy, *args, **kwargs):
super().__init__(*args, **kwargs)
self.proxy = proxy
assert proxy.startswith('http://'), (
"Only http proxy supported", proxy)
@asyncio.coroutine
def _create_connection(self, req, **kwargs):
proxy_req = ClientRequest('GET', self.proxy,
headers={'Host': req.host})
try:
transport, proto = yield from super()._create_connection(proxy_req)
except OSError:
raise ProxyConnectionError()
if req.ssl:
# For HTTPS requests over HTTP proxy
# we must notify proxy to tunnel connection
# so we send CONNECT command:
# CONNECT www.python.org:443 HTTP/1.1
# Host: www.python.org
#
# next we must do TLS handshake and so on
# to do this we must wrap raw socket into secure one
# asyncio handles this perfectly
proxy_req.method = 'CONNECT'
proxy_req.path = '{}:{}'.format(req.host, req.port)
key = (req.host, req.port, req.ssl)
conn = Connection(self, key, proxy_req, transport, proto)
proxy_resp = proxy_req.send(conn.writer, conn.reader)
try:
resp = yield from proxy_resp.start(conn, True)
except:
proxy_resp.close()
conn.close()
raise
else:
if resp.status != 200:
raise HttpProxyError(resp.status, resp.reason)
rawsock = transport.get_extra_info('socket', default=None)
if rawsock is None:
raise RuntimeError(
"Transport does not expose socket instance")
transport.pause_reading()
transport, proto = yield from self._loop.create_connection(
self._factory, ssl=True, sock=rawsock,
server_hostname=req.host, **kwargs)
return transport, proto
class UnixConnector(BaseConnector):
def __init__(self, path, *args, **kw):
super().__init__(*args, **kw)
self.path = path
@asyncio.coroutine
def _create_connection(self, req, **kwargs):
return (yield from self._loop.create_unix_connection(
self._factory, self.path, **kwargs))
SocketConnector = TCPConnector
UnixSocketConnector = UnixConnector
| {
"repo_name": "jtackaberry/stagehand",
"path": "external/aiohttp/connector.py",
"copies": "1",
"size": "10888",
"license": "mit",
"hash": 6614208541410940000,
"line_mean": 32.9190031153,
"line_max": 79,
"alpha_frac": 0.537564291,
"autogenerated": false,
"ratio": 4.499173553719008,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015576323987538941,
"num_lines": 321
} |
__all__ = ['BaseConstructor', 'Constructor', 'ConstructorError']
from .error import MarkedError
from .nodes import * # NOQA
from .markedvalue import gen_marked_value
import collections
import types
from functools import wraps
try:
from __builtin__ import unicode
except ImportError:
unicode = str # NOQA
def marked(func):
@wraps(func)
def f(self, node, *args, **kwargs):
return gen_marked_value(func(self, node, *args, **kwargs), node.start_mark)
return f
class ConstructorError(MarkedError):
pass
class BaseConstructor:
yaml_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
raise ConstructorError(None, None, 'no constructor for tag %s' % node.tag)
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = next(generator)
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
if deep:
self.deep_construct = old_deep
return data
@marked
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
@marked
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.Hashable):
self.echoerr('While constructing a mapping', node.start_mark,
'found unhashable key', key_node.start_mark)
continue
elif type(key.value) != unicode:
self.echoerr('Error while constructing a mapping', node.start_mark,
'found key that is not a string', key_node.start_mark)
continue
elif key in mapping:
self.echoerr('Error while constructing a mapping', node.start_mark,
'found duplicate key', key_node.start_mark)
continue
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
@classmethod
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
class Constructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == 'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return BaseConstructor.construct_scalar(self, node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == 'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == 'tag:yaml.org,2002:value':
key_node.tag = 'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return BaseConstructor.construct_mapping(self, node, deep=deep)
@marked
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
@marked
def construct_yaml_bool(self, node):
value = self.construct_scalar(node).value
return bool(value)
@marked
def construct_yaml_int(self, node):
value = self.construct_scalar(node).value
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
else:
return sign * int(value)
@marked
def construct_yaml_float(self, node):
value = self.construct_scalar(node).value
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
else:
return sign * float(value)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
def construct_yaml_seq(self, node):
data = gen_marked_value([], node.start_mark)
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = gen_marked_value({}, node.start_mark)
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag,
node.start_mark)
Constructor.add_constructor(
'tag:yaml.org,2002:null',
Constructor.construct_yaml_null)
Constructor.add_constructor(
'tag:yaml.org,2002:bool',
Constructor.construct_yaml_bool)
Constructor.add_constructor(
'tag:yaml.org,2002:int',
Constructor.construct_yaml_int)
Constructor.add_constructor(
'tag:yaml.org,2002:float',
Constructor.construct_yaml_float)
Constructor.add_constructor(
'tag:yaml.org,2002:str',
Constructor.construct_yaml_str)
Constructor.add_constructor(
'tag:yaml.org,2002:seq',
Constructor.construct_yaml_seq)
Constructor.add_constructor(
'tag:yaml.org,2002:map',
Constructor.construct_yaml_map)
Constructor.add_constructor(None,
Constructor.construct_undefined)
| {
"repo_name": "keelerm84/powerline",
"path": "powerline/lint/markedjson/constructor.py",
"copies": "3",
"size": "7570",
"license": "mit",
"hash": 5039975007949271000,
"line_mean": 26.6277372263,
"line_max": 77,
"alpha_frac": 0.701321004,
"autogenerated": false,
"ratio": 3.2035548032162504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.540487580721625,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BaseFeatClass', 'load_featclass']
from argparse import ArgumentParser
import logging
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from ycsettings import Settings
from ..classifiers import load_classifier
from ..featurizers import load_featurizer
from uriutils import uri_open
from ..utils import get_class_from_module_path, chunked_iterator
logger = logging.getLogger(__name__)
class BaseFeatClass(BaseEstimator, ClassifierMixin):
def __init__(self, featurizer_uri, classifier_uri, *, transform_args={}, predict_args={}, **kwargs):
super(BaseFeatClass, self).__init__(**kwargs)
self.featurizer_uri = featurizer_uri
self.classifier_uri = classifier_uri
self.transform_args = transform_args
self.predict_args = predict_args
with uri_open(featurizer_uri, mode='rb') as f:
self.featurizer = load_featurizer(f)
with uri_open(classifier_uri, mode='rb') as f:
self.classifier = load_classifier(f)
if 'featurizer_uuid' in kwargs and self.featurizer.uuid != kwargs['featurizer_uuid']:
raise TypeError('Featurizer UUID mismatch: {} (this) != {} (<{}>)'.format(kwargs['featurizer_uuid'], self.featurizer.uuid, featurizer_uri))
elif 'classifier_uuid' in kwargs and self.classifier.uuid != kwargs['classifier_uuid']:
raise TypeError('Classifier UUID mismatch: {} (this) != {} (<{}>)'.format(kwargs['classifier_uuid'], self.classifier.uuid, classifier_uri))
#end def
def fit(self, *args, **kwargs):
raise NotImplementedError('BaseFeatClass does not support the `fit` method.')
def transform(self, X, **kwargs):
return self.predict_proba(X, **kwargs)
#end def
def predict(self, X, **kwargs):
predict_args = self.predict_args.copy()
predict_args.update(kwargs)
X_featurized = self.featurizer.transform(X, **self.transform_args)
return self.classifier.predict(X_featurized, **predict_args)
#end def
def predict_proba(self, X, **kwargs):
predict_args = self.predict_args.copy()
predict_args.update(kwargs)
X_featurized = self.featurizer.transform(X, **self.transform_args)
return self.classifier.predict_proba(X_featurized, **predict_args)
#end def
def predict_and_proba(self, X, **kwargs):
predict_args = self.predict_args.copy()
predict_args.update(kwargs)
X_featurized = self.featurizer.transform(X, **self.transform_args)
return self.classifier.predict_and_proba(X_featurized, **predict_args)
#end def
def predictions_generator(self, instances_generator, *, chunk_size=100000, include_proba=True, unbinarized=True):
for chunk in chunked_iterator(instances_generator, chunk_size):
X = np.array(chunk, dtype=np.object)
if include_proba:
Y_proba, Y_predict = self.predict_and_proba(X)
Y_proba_dicts = self.classifier.unbinarize_labels(Y_proba, to_dict=True)
else:
Y_predict = self.predict(X)
Y_proba_dicts = None
#end if
Y_predict_list = self.classifier.unbinarize_labels(Y_predict, to_dict=False)
if include_proba:
if unbinarized:
yield from ((X[i], Y_predict_list[i], Y_proba_dicts[i]) for i in range(X.shape[0]))
else:
yield from ((X[i], Y_predict[i], Y_proba[i]) for i in range(X.shape[0]))
else:
if unbinarized:
yield from ((X[i], Y_predict_list[i]) for i in range(X.shape[0]))
else:
yield from ((X[i], Y_predict[i]) for i in range(X.shape[0]))
#end if
#end for
#end def
def decision_function(self, X, **kwargs):
return self.predict_proba(X, **kwargs)
@property
def classes_(self): return self.classifier.classes_
def __str__(self):
return 'BaseFeatClass(featurizer_uri={}, classifier_uri={})'.format(self.featurizer_uri, self.classifier_uri)
#end def
#end class
def load_featclass(*, settings=None, uri=None, check_environment=True):
if settings is None:
settings = Settings(uri)
if not isinstance(settings, Settings):
settings = Settings(settings, uri)
featclass_type = settings.get('featclass_type', raise_exception=True)
featclass_parameters = settings.getdict('featclass_parameters', default={})
featclass_class = get_class_from_module_path(featclass_type)
featclass = featclass_class(**featclass_parameters)
logger.info('Loaded {} from <{}>.'.format(featclass, uri if uri else ('env/settings' if check_environment else 'settings')))
return featclass
#end def
def main():
parser = ArgumentParser(description='Loads a featurizer-classifier. (DEVELOPMENT SCRIPT)')
parser.add_argument('featclass_uri', type=str, metavar='<featclass_uri>', help='Featclass settings file to load.')
A = parser.parse_args()
logging.basicConfig(format=u'%(asctime)-15s [%(name)s-%(process)d] %(levelname)s: %(message)s', level=logging.INFO)
featclass = load_featclass(uri=A.featclass_uri)
print(featclass.transform(np.array([dict(content='this is a tort case. responsibility')])))
print(featclass.predict_proba(np.array([dict(content='this is a tort case. responsibility')])))
#end def
if __name__ == '__main__': main()
| {
"repo_name": "skylander86/ycml",
"path": "ycml/featclass/base.py",
"copies": "1",
"size": "5494",
"license": "apache-2.0",
"hash": -1861338971105844500,
"line_mean": 35.6266666667,
"line_max": 151,
"alpha_frac": 0.6441572625,
"autogenerated": false,
"ratio": 3.6553559547571526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9758366981989264,
"avg_score": 0.008229247053577726,
"num_lines": 150
} |
__all__ = ['BaseModel']
class BaseModel(object):
"""Subclasses of this class are fed into the :meth:`~cosmic.api.API.model`
decorator to attach models to an API. The API object doesn't care about
how you implement this class, it just copies the necessary properties and
leaves the class alone.
"""
#: A list of properties which, along with
#: :data:`~cosmic.models.BaseModel.links` below, will be used for the
#: model's representation and patch, defined in the same way Teleport
#: Struct fields are defined:
#:
#: .. code:: python
#:
#: properties = [
#: required('name', String),
#: optional('age', Integer),
#: ]
#:
#: See :class:`~cosmic.types.Representation` and
#: :class:`~cosmic.types.Patch`.
properties = []
#: A list of methods that this model supports. Possible values are
#: ``'get_by_id'``, ``'create'``, ``'update'``, ``'delete'`` and
#: ``'get_list'``.
methods = []
#: A list of properties for the :meth:`get_list` handler. They are defined
#: in the same way as :data:`properties` above.
query_fields = []
#: A list of properties that can be returned along with the usual response
#: for :meth:`~cosmic.models.BaseModel.get_list`. These can be used for
#: things like pagination.
list_metadata = []
#: Similar to properties, but encodes a relationship between this model
#: and another. In database terms this would be a foreign key. Use
#: :func:`~cosmic.types.required_link` and
#: :func:`~cosmic.types.optional_link` to specify them.
links = []
@classmethod
def get_by_id(cls, id):
"""
:param id:
:return: Model representation
:raises cosmic.exceptions.NotFound:
"""
raise NotImplementedError()
@classmethod
def get_list(cls, **kwargs):
"""
:param kwargs: Defined by \
:data:`~cosmic.models.BaseModel.query_fields`
:return: If model does not define \
:data:`~cosmic.models.BaseModel.list_metadata`, returns a list of
tuples of models ids and representations. Otherwise returns a
tuple where the first element is the above list, and the second is
a dict as specified by \
:data:`~cosmic.models.BaseModel.list_metadata`.
"""
raise NotImplementedError()
@classmethod
def create(cls, **valid_patch):
"""
:param validated_patch: The model patch.
:return: A tuple of model id and model representation.
"""
raise NotImplementedError()
@classmethod
def update(cls, id, **valid_patch):
"""
:param id:
:param validated_patch:
:return: The model representation after patch has been applied.
:raises cosmic.exceptions.NotFound:
"""
raise NotImplementedError()
@classmethod
def delete(cls, id):
"""
:param id:
:raises cosmic.exceptions.NotFound:
"""
raise NotImplementedError()
@classmethod
def validate_patch(cls, patch):
"""
:param patch: The model patch
:raises cosmic.exceptions.ValidationError:
Run before any :meth:`~cosmic.models.BaseModel.create` or
:meth:`~cosmic.models.BaseModel.update` call to validate the patch.
All fields are made optional for the patch, so this method is a chance
to ensure that the expected values were indeed passed in.
"""
pass
| {
"repo_name": "cosmic-api/cosmic.py",
"path": "cosmic/models.py",
"copies": "1",
"size": "3565",
"license": "mit",
"hash": 6862500211697329000,
"line_mean": 33.6116504854,
"line_max": 78,
"alpha_frac": 0.6084151473,
"autogenerated": false,
"ratio": 4.326456310679611,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5434871457979611,
"avg_score": null,
"num_lines": null
} |
# All basic usage of numpy
import numpy as np
# prints the version of numpy
print(np.__version__)
# Whether element is a numpy array
el = np.array(list())
# Below command resolves to true
if isinstance(el, np.ndarray):
print("Element is a numpy array.")
array1 = np.array([1, 2, 3, 4])
# It performs element wise operation on whole collection
sum_of_array = array1 + array1
# Returns [2, 4, 6, 8]
print(sum_of_array)
# We can access individual numpy array elements, use square brackets
# Prints 2. i.e. value of array with index 1
print(array1[1])
# Returns array of boolean values which are greater than 2
array2 = array1 > 2
# Prints [False, False, True, True]
print(array2)
# We can print selective items of array by passing array of boolean values
array3 = array1[array2]
print(array3)
two_d_list = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
two_d_array = np.array(two_d_list)
# Shape is 3 rows, 4 columns
print(two_d_array.shape)
# To get all the 3rd columns of
print(two_d_array[:,2])
# To get values [6, 7]
print(two_d_array[1,1:3])
# Reshape
org_array = [1, 2, 3, 4, 5, 6, 7, 8]
conv_np_array = np.array(org_array)
# Returns (8,) as shape
print('Shape ', conv_np_array.shape)
# Converting into shape of (2,4)
conv_2_4_array = np.reshape(conv_np_array, [2, 4])
# Returns (2, 4) as shape
print('Shape ', conv_2_4_array.shape)
# Prints [[1 2 3 4], [5 6 7 8]]
print('Element of conv_2_4_array', conv_2_4_array)
# Converting into shape of (2, 2, 2)
conv_2_2_2_array = np.reshape(conv_np_array, [2, 2, 2])
# Returns (2, 2, 2) as shape
print('Shape ', conv_2_2_2_array.shape)
# Prints
"""
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]
"""
print('Element of conv_2_2_2_array', conv_2_2_2_array)
# The shape will convert [row1, row2, ....rown-1, col]
# If we want to have single row with elements just 1 or -1 to the top of the shape list.
# Converting into shape of (1, 2, 2, 2)
conv_1_2_2_2_array = np.reshape(conv_np_array, [-1, 2, 2, 2])
# Returns (1, 2, 2, 2) as shape, even if we gave first dimension of shape as -1
print('Shape ', conv_1_2_2_2_array.shape)
"""
[[[[1 2]
[3 4]]
[[5 6]
[7 8]]]]
"""
print('Element of conv_1_2_2_2_array', conv_1_2_2_2_array)
# Perform an operation on each element of array
"""
import math
import numpy as np
def calcSigmoid(z):
return 1 / ( 1 + math.exp(-z) )
def sigm(X):
sm = np.vectorize(calcSigmoid)
return sm(X)
sigm([1,0])
array([0.73105858, 0.5 ])
"""
# Adding columns
first = np.array([[1,3], [2,4]])
'''
array([[1, 2],
[3, 4]])
'''
sec = np.array([[5],[6]])
'''
array([[5],
[6]])
'''
# on column concat
third = np.concatenate((first, sec), axis=1)
'''
array([[1, 2, 5],
[3, 4, 6]])
'''
# Get the scalar value of (1,1) matrix
in_vec = np.array([[5]])
'''
array([[5]])
'''
in_vec.shape # (1, 1)
in_scalar = np.asscalar(in_vec) # 5
# identity matrix
eye = np.eye(3)
'''
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
'''
# Reading csv file
'''
from numpy import genfromtxt
my_data = genfromtxt('my_file.csv', delimiter=',')
'''
'''
X = np.arange(1,7)
#array([1, 2, 3, 4, 5, 6])
X = X.reshape(3, 2)
array([[1, 2],
[3, 4],
[5, 6]])
If we see the elements arranged in filling first rows by rows
If we want to fill by columns first, i.e
array([[1, 4],
[2, 5],
[3, 6]])
We can do in two ways
1. First take the order in reverse then transpose
X = np.arange(1,7).reshape(2,3) # Observe 2,3 rather thn 3,2
X = X.T
2. Chaining with swapaxes
X = np.arange(1,7).reshape(2,3).swapaxes(0,1)
'''
'''
Indexing from particular index to end
mm = np.arange(1,5).reshape(2,2)
mm
array([[1, 2],
[3, 4]])
nn = mm.flatten().reshape((4,1))
array([[1],
[2],
[3],
[4]])
# From index 2 to end
nn[2:,]
array([[3],
[4]]) | {
"repo_name": "pk-python/basics",
"path": "numpy/num_py.py",
"copies": "1",
"size": "3791",
"license": "mit",
"hash": -8492246197718143000,
"line_mean": 19.8351648352,
"line_max": 88,
"alpha_frac": 0.6027433395,
"autogenerated": false,
"ratio": 2.5614864864864866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3664229825986487,
"avg_score": null,
"num_lines": null
} |
""" All battle information in a turn.
"""
class BattleCard:
""" Information for a card in battle field.
"""
def __init__(self, card, free):
self._card = card
self._free = free
self.used = False
self.doubled = False
self.destroyed = False
def use(self, context):
if not self.used:
self.card.use(context)
self.used = True
@property
def free(self):
return self._free
@property
def fighting_value(self):
if self.destroyed:
return 0
value = self._card.fighting_value
if self.doubled:
value *= 2
return value
class BattleField:
""" Battle information in a turn.
"""
def __init__(self, enemy=None, step=0):
self._enemy = enemy
self._free_cards = []
self._additional_cards = []
self._additional_limit = 0
self._step = step
@property
def free_limit(self):
return self._enemy.free_cards_num()
@property
def additional_limit(self):
return self._additional_limit
@property
def step(self):
return self._step
@step.setter
def step(self, value):
if value >= 0:
self._step = value
def new_battle(self, enemy=None):
if self._enemy is not None:
self._free_cards = []
self._additional_cards = []
self._additional_limit = 0
self._enemy = enemy
def add_free(self, card):
if len(self._free_cards) >= self.free_limit:
return False
self._free_cards.append(BattleCard(card, True))
return True
def get_free(self):
return list(self._free_cards)
def add_additional(self, card):
if len(self._additional_cards) >= self.additional_limit:
return False
self._additional_cards.append(card)
return True
def get_additional(self):
return list(self._additional_cards)
def add_additional_limit(self, num):
self._additional_limit += num
@property
def fighting_value(self):
free = sum(bc.fighting_value for bc in self._free_cards)
additional = sum(bc.fighting_value for bc in self._additional_cards)
# TODO deal with highest zero
# TODO deal with pirate modifier.
return free + additional
def won(self):
return self.health_lose() == 0
def health_lose(self):
return max(0, self._enemy.hazard_value(self._step) - self.fighting_value)
| {
"repo_name": "cwahbong/tgif-py",
"path": "tgif/battle_field.py",
"copies": "1",
"size": "2550",
"license": "mit",
"hash": 8338499693287279000,
"line_mean": 24.5,
"line_max": 81,
"alpha_frac": 0.5721568627,
"autogenerated": false,
"ratio": 3.8519637462235647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49241206089235645,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BinaryLabelsClassifier', 'MultiLabelsClassifier', 'MulticlassLabelsClassifier', 'filter_labels']
import logging
import numpy as np
from sklearn.preprocessing import MultiLabelBinarizer
from .base import BaseClassifier
logger = logging.getLogger(__name__)
class LabelsClassifierMixin(BaseClassifier):
def __init__(self, include=[], exclude=[], **kwargs):
super(LabelsClassifierMixin, self).__init__(**kwargs)
self.exclude = set(exclude)
self.include = set(include)
#end def
def _fit(self, X, Y_labels, *, validation_data=None, binarize_args={}, fit_args={}, **kwargs):
Y_binarized = self.binarize_labels(Y_labels, **binarize_args, **kwargs)
if validation_data is not None:
X_validation, Y_validation = validation_data
validation_data = (X_validation, self.binarize_labels(Y_validation, **binarize_args, **kwargs))
logger.debug('Binarized validation labels.')
#end if
return self.fit_binarized(X, Y_binarized, validation_data=validation_data, **fit_args, **kwargs)
#end def
def fit_binarized(self, X_featurized, Y_binarized, **kwargs): raise NotImplementedError('fit_binarized is not implemented.')
def binarize_labels(self, Y_labels, **kwargs): raise NotImplementedError('binarize_labels is not implemented.')
def binarize_dicts(self, Y_dicts, *, default=0.0, **kwargs): raise NotImplementedError('binarize_dicts is not implemented.')
def unbinarize_labels(self, Y_proba, *, epsilon=1e-5, to_dict=False, astype=float, **kwargs):
assert len(self.classes_) == Y_proba.shape[1]
unbinarized = np.empty(Y_proba.shape[0], dtype=np.object)
for i in range(Y_proba.shape[0]):
if to_dict: unbinarized[i] = dict((self.classes_[j], astype(Y_proba[i, j])) for j in range(Y_proba.shape[1]) if Y_proba[i, j] > epsilon)
else: unbinarized[i] = [self.classes_[j] for j in range(Y_proba.shape[1]) if Y_proba[i, j] > epsilon]
#end for
return unbinarized
#end def
#end def
class BinaryLabelsClassifier(LabelsClassifierMixin):
def __init__(self, pos_label, *, not_pos_label=None, **kwargs):
super(BinaryLabelsClassifier, self).__init__(**kwargs)
self.pos_label = pos_label
self.not_pos_label = 'not ' + pos_label if not_pos_label is None else not_pos_label
#end def
def binarize_labels(self, Y_labels, **kwargs):
Y_binarized = np.zeros((Y_labels.shape[0], 2))
for i in range(Y_labels.shape[0]):
if self.pos_label in Y_labels[i]: Y_binarized[i, 1] = 1
else: Y_binarized[i, 0] = 1
#end for
return Y_binarized
#end def
def binarize_dicts(self, Y_dicts, *, default=0.0, **kwargs):
Y_binarized = np.zeros((Y_dicts.shape[0], 2))
for i in range(Y_dicts.shape[0]):
Y_binarized[i, 1] = Y_dicts[i].get(self.pos_label, default)
Y_binarized[:, 0] = 1.0 - Y_binarized[:, 1]
return Y_binarized
#end def
@property
def classes_(self):
return [self.not_pos_label, self.pos_label]
#end def
class MultiLabelsClassifier(LabelsClassifierMixin):
def _fit(self, X, Y_labels, **kwargs):
Y_labels_filtered = filter_labels(Y_labels, include=self.include, exclude=self.exclude)
self.label_binarizer_ = MultiLabelBinarizer(sparse_output=False).fit(Y_labels_filtered)
logger.info('{} labels found in training instances.'.format(len(self.classes_)))
if not len(self.classes_): raise ValueError('There are no labels available for fitting model.')
return super(MultiLabelsClassifier, self)._fit(X, Y_labels_filtered, **kwargs)
#end def
def binarize_labels(self, Y_labels, **kwargs):
classes_ = set(self.classes_)
return self.label_binarizer_.transform((filter(classes_.__contains__, labels) for labels in Y_labels))
#end def
def binarize_dicts(self, Y_dicts, *, default=0.0, **kwargs):
binarized = np.fill((Y_dicts.shape[0], len(self.classes_)), default, dtype=np.float)
classes_map = dict((c, i) for i, c in enumerate(self.classes_))
for i in range(Y_dicts.shape[0]):
d = Y_dicts[i]
for k, p in d.items():
try: binarized[i, classes_map[k]] = p
except IndexError: pass
#end for
#end for
return binarized
#end def
@property
def classes_(self):
return self.label_binarizer_.classes_
#end def
#end def
class MulticlassLabelsClassifier(MultiLabelsClassifier):
def _fit(self, X, Y_labels, **kwargs):
Y_labels_filtered = self._filter_and_check_labels(Y_labels)
if any(len(Y_labels_filtered[i]) == 0 for i in range(Y_labels_filtered.shape[0])):
logger.warning('Some of Y_labels have no labels after filtering but this is a multiclass classifier. A `<none>` label will be created.')
for i in range(Y_labels_filtered.shape[0]):
if not Y_labels_filtered[i]:
Y_labels_filtered[i] = ['<none>']
#end if
return super(MulticlassLabelsClassifier, self)._fit(X, Y_labels_filtered, **kwargs)
#end def
def predict(self, X_featurized, **kwargs):
Y_proba = self.predict_proba(X_featurized, **kwargs)
Y_predict_binarized = np.zeros(Y_proba.shape)
for i in range(X_featurized.shape[0]):
j = np.argmax(Y_proba[i, :])
Y_predict_binarized[i, j] = 1
#end for
return Y_predict_binarized
#end def
def predict_and_proba(self, X_featurized, **kwargs):
Y_proba = super(MulticlassLabelsClassifier, self).predict_proba(X_featurized, **kwargs)
Y_predict_binarized = np.zeros(Y_proba.shape)
for i in range(X_featurized.shape[0]):
j = np.argmax(Y_proba[i, :])
Y_predict_binarized[i, j] = 1
#end for
return Y_proba, Y_predict_binarized
#end def
def binarize_labels(self, Y_labels, **kwargs):
Y_labels_filtered = self._filter_and_check_labels(Y_labels)
return super(MulticlassLabelsClassifier, self).binarize_labels(Y_labels_filtered, **kwargs)
#end def
def multiclassify_labels(self, Y_labels, **kwargs):
Y_binarized = self.binarize_labels(Y_labels, **kwargs)
return MulticlassLabelsClassifier.multilabel_to_multiclass(Y_binarized)
#end def
@classmethod
def multilabel_to_multiclass(cls, Y_multilabel):
Y_multiclass = np.zeros(Y_multilabel.shape[0])
for j in range(Y_multilabel.shape[1]):
Y_multiclass[Y_multilabel[:, j] > 0] = j
return Y_multiclass
#end def
@classmethod
def multiclass_to_multilabel(cls, Y_multiclass, n_classes=None):
if n_classes is None: n_classes = np.max(Y_multiclass)
Y_multilabel = np.zeros((Y_multiclass.shape[0], n_classes))
for j in range(Y_multilabel.shape[1]):
Y_multilabel[Y_multiclass == j, j] = 1
return Y_multiclass
#end def
def _filter_and_check_labels(self, Y_labels):
Y_labels_filtered = filter_labels(Y_labels, include=self.include, exclude=self.exclude)
if any(len(Y_labels[i]) > 1 for i in range(Y_labels.shape[0])):
logger.warning('Some of Y_labels contain more than 1 labels but this is a multiclass classifier. Only the first labels will be used.')
return np.array([[Y_labels_filtered[i][0]] if Y_labels_filtered[i] else [] for i in range(Y_labels_filtered.shape[0])])
#end def
@property
def classes_(self):
return self.label_binarizer_.classes_
#end def
#end def
def filter_labels(Y_labels, *, include=[], exclude=[]):
if not exclude and not include: return Y_labels
if include: logger.debug('Included labels: {}'.format(', '.join(sorted(include))))
if exclude: logger.debug('Excluded labels: {}'.format(', '.join(sorted(exclude))))
Y_labels_filtered = []
removed_labels = 0
for i in range(Y_labels.shape[0]):
Y_labels_filtered.append([l for l in Y_labels[i] if (l in include or not include) and l not in exclude])
removed_labels += len(Y_labels[i]) - len(Y_labels_filtered[i])
#end for
Y_labels_filtered = np.array(Y_labels_filtered, dtype=np.object)
if removed_labels: logger.info('{} label-instances removed from the data.'.format(removed_labels))
return Y_labels_filtered
#end def
| {
"repo_name": "skylander86/ycml",
"path": "ycml/classifiers/labels.py",
"copies": "1",
"size": "8486",
"license": "apache-2.0",
"hash": 2514642399568660500,
"line_mean": 37.0538116592,
"line_max": 148,
"alpha_frac": 0.6361065284,
"autogenerated": false,
"ratio": 3.4121431443506234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45482496727506233,
"avg_score": null,
"num_lines": null
} |
__all__ = ['binder', 'TOP', 'ANY_NAMESPACE', 'REMOVE_RULE',
'PY_REPLACE_PAT', 'RESERVED_NAMES']
import re
import sets
import keyword
import warnings
import cStringIO
import bisect
import xml.sax
from xml.dom import Node
from Ft.Xml import InputSource, Sax, SplitQName
from Ft.Xml import XML_NAMESPACE as XML_NS
from Ft.Xml.Domlette import NonvalidatingReader, XmlStrStrip
from Ft.Xml.Xslt import PatternList, OutputParameters, XmlWriter
from amara import domtools
from amara import saxtools
#FIXME: Set up to use actual PyXML if available
from amara.pyxml_standins import *
from amara.binderyxpath import *
ANY_NAMESPACE = 'http://uche.ogbuji.net/tech/4suite/amara/reserved/any-namespace'
TOP = -1
REMOVE_RULE = True
#FIXME: Use 4Suite L10N
def _(t): return t
g_namespaces = {}
class default_naming_rule:
'''
Represents naming and conversion rules for Python and XML IDs
'''
def __init__(self):
self.cache = {}
return
def xml_to_python(self, local, ns=None, check_clashes=None):
python_id = self.cache.get((local, ns))
if not python_id:
python_id = PY_REPLACE_PAT.sub('_', local)
if python_id in RESERVED_NAMES:
python_id = python_id + '_'
self.cache[(local, ns)] = python_id
if check_clashes:
while python_id in check_clashes:
python_id += '_'
return python_id
def python_to_xml(self, python_id):
#XML NMTOKENS are a superset of Python IDs
xml_name = python_id
return xml_name
class namespace:
def __init__(self, nsuri, common_prefix=None):
self.nsuri = nsuri
self.common_prefix = common_prefix
self.binding_classes = {}
return
def __unicode__():
return self.nsuri
class binder(LexicalHandler, object):
def __init__(self, prefixes=None):
self.prefixes = prefixes or {}
self.binding = None
self.binding_stack = []
self.event = None
#One list of rules for each DOM node type
self.rules = {
saxtools.START_ELEMENT: [(0, default_element_rule().apply)],
saxtools.END_ELEMENT: [],
saxtools.CHARACTER_DATA: [(0, default_text_rule().apply)],
saxtools.PI: [(0, default_pi_rule().apply)],
saxtools.COMMENT: [(0, default_comment_rule().apply)],
saxtools.START_DOCUMENT: [(0, default_root_rule().apply)],
saxtools.END_DOCUMENT: [],
}
#Preferences
self.preserve_comments = True
self.state_machine = None
self.xpatterns = []
self.naming_rule = default_naming_rule()
self._rules_to_add = []
self._rules_to_remove = []
self.event_type = None
self.first_startelement = False
#self.unicodecache = {}
self.pi_classes = {}
return
#Overridden ContentHandler methods
def startPrefixMapping(self, prefix, uri):
if not self.first_startelement:
#Add prefixes discovered during parse of the top-level element
#FIXME: What if the user does not want their manual mappings
#Overriden by those in document? (Probably a rare consideration)
self.prefixes[prefix] = uri
return
def startDocument(self):
if self.state_machine: self.state_machine.event(1, None, None)
#Path components representing XPath steps to current element
self.steps = [u'']
self.event = (saxtools.START_DOCUMENT,)
self.apply_rules()
#self.binding_stack.append(self.apply_rules(saxtools.DOCUMENT_NODE))
return
def endDocument(self):
if self.state_machine: self.state_machine.event(0, None, None)
self.event = (saxtools.END_DOCUMENT,)
self.apply_rules()
return
#Overridden DocumentHandler methods
def startElementNS(self, name, qname, attribs):
(ns, local) = name
if self.state_machine: self.state_machine.event(1, ns, local)
self.event = (saxtools.START_ELEMENT, qname, ns, local, attribs)
self.apply_rules()
if not self.first_startelement: self.first_startelement = True
return
def endElementNS(self, name, qname):
(ns, local) = name
if self.state_machine: self.state_machine.event(0, ns, local)
self.event = (saxtools.END_ELEMENT, qname, ns, local)
self.apply_rules()
#self.binding_stack.pop()
return
def characters(self, text):
#if len(text) < 24:
# text = self.unicodecache.setdefault(text, text)
self.event = (saxtools.CHARACTER_DATA, text)
self.apply_rules()
return
def processingInstruction(self, target, data):
self.event = (saxtools.PI, target, data)
self.apply_rules()
return
#Overridden LexicalHandler methods
def comment(self, data):
#print "COMMENT", data
if self.preserve_comments:
self.event = (saxtools.COMMENT, data)
self.apply_rules()
return
def startDTD(self, name, public_id, system_id):
#print "START DTD", name, public_id, system_id
return
#Bindery methods
def _add_rule(self, event_type, priority, rule):
if callable(rule):
bisect.insort(self.rules[event_type], (priority, rule))
else:
bisect.insort(self.rules[event_type], (priority, rule.apply))
try:
rule.add_rule_hook(self)
except AttributeError:
#No hook defined
pass
return
def add_rule(self, rule, event_type=None):
"""
Add a rule for an event type.
You can also manipulate self.rules directly, but consider details
such as priority and add_rule hooks
rule - the rule object. Can be a function or an instance that
defines an apply() function
if you do not specify the event type, it must be set as an attribute
on the rule
"""
if not event_type: event_type = rule.event_type
priority = getattr(rule, 'priority', 0)
if event_type == self.event_type:
self._rules_to_add.append((priority, rule))
else:
self._add_rule(event_type, priority, rule)
return
def remove_rule(self, rule_callback, event_type):
"""
Remove a rule for a given event. Do so smartly.
If we're within an apply_rules, don't screw up the loop
rule - rule callback (function or "apply" method)
"""
priority = getattr(rule_callback, 'priority', 0)
if event_type == self.event_type:
self._rules_to_remove.append((priority, rule_callback))
else:
self.rules[event_type].remove((priority, rule_callback))
return
def apply_rules(self):
self.event_completely_handled = False
self.event_type = self.event[0]
#List copy is wasteful, but because of the dynamics of the mutation
#within the loop, just about inevitable
for priority, rule_callback in self.rules[self.event_type]:
rule_callback(self)
for (priority, rule) in self._rules_to_add:
self._add_rule(self.event_type, priority, rule)
for (priority, rule) in self._rules_to_remove:
self.rules[self.event_type].remove((priority, rule))
self._rules_to_add = []
self._rules_to_remove = []
self.event_type = None
return
def set_binding_class(self, nsuri, local, class_):
"""
Map a Python class to an element type so that each occurrence of the
element results in an instance of the class
"""
ns_class = g_namespaces.setdefault(nsuri, namespace(nsuri))
ns_class.binding_classes[local] = class_
return
def set_pi_binding_class(self, target, class_):
"""
Map a Python class to a processing instruction target so that each
occurrence of a PI with that target
results in an instance of the class
"""
self.pi_classes = [target] = class_
return
def read_xml(self, input_source, validate=False):
if self.xpatterns:
self.state_machine = saxtools.xpattern_state_manager(
self.xpatterns, self.prefixes)
parser = Sax.CreateParser()
parser.setProperty(
"http://xml.org/sax/properties/lexical-handler",
self
)
if validate:
parser.setFeature(xml.sax.handler.feature_validation, True)
else:
parser.setFeature(xml.sax.handler.feature_external_pes, False)
parser.setContentHandler(self)
#parser.setFeature(sax.handler.feature_namespaces, 1)
parser.parse(input_source)
root = self.binding_stack[0]
if u'xml' not in self.prefixes:
self.prefixes.update({u'xml': XML_NS})
root.xmlns_prefixes = self.prefixes
root.xml_namespaces = g_namespaces
return root
PY_REPLACE_PAT = re.compile(u'[^a-zA-Z0-9_]')
#Only need to list IDs that do not start with "xml", "XML", etc.
RESERVED_NAMES = [
'__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__',
'__getitem__', '__hash__', '__init__', '__iter__', '__module__',
'__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__',
'__str__', '__unicode__', '__weakref__', '_childNodes', '_docIndex',
'_localName', '_namespaceURI', '_parentNode', '_prefix', '_rootNode',
'childNodes', 'docIndex',
'localName', 'namespaceURI', 'next_elem', 'nodeName', 'nodeType',
'ownerDocument', 'parentNode', 'prefix', 'rootNode', 'locals',
'None'
]
RESERVED_NAMES.extend(keyword.kwlist)
RESERVED_NAMES = sets.ImmutableSet(RESERVED_NAMES)
#Phases to which rules should be added
#Usually there should only be one MAKE_INSTANCE phase rule, and this is
#Usually the default rule
#PRE_INSTANCE rules are usually for preventing certain events from creating objects
#POST_INSTANCE rules are usually for decorating or modifying created objects
PRE_INSTANCE, MAKE_INSTANCE, POST_INSTANCE = 1, 2, 3
#All local names in this class must be prefixed with "xml" so that they
#Don't conflict with the generated class name for the element binding object
def create_element(xmlnaming_rule, xmlqname, xmlns=None, xmlename=None):
xmlprefix, xmllocal = SplitQName(xmlqname)
if not xmlename: xmlename = xmlnaming_rule.xml_to_python(xmllocal, xmlns)
xmlns_class = g_namespaces.setdefault(xmlns, namespace(xmlns, xmlprefix))
if xmlns_class.binding_classes.has_key(xmlename):
xmlclass = xmlns_class.binding_classes[xmlename]
else:
exec "class %s(element_base): pass"%xmlename in globals(), locals()
xmlclass = locals()[xmlename]
xmlns_class.binding_classes[xmlename] = xmlclass
if not hasattr(xmlclass, "xml_naming_rule"):
xmlclass.xml_naming_rule = xmlnaming_rule
xmlinstance = xmlclass()
xmlinstance.nodeName = xmlqname
if xmlns:
xmlinstance.xmlnsUri = xmlns
xmlinstance.xmlnsPrefix = xmlprefix
xmlinstance.xmlnsLocalName = xmllocal
return xmlinstance
def bind_attributes(instance, parent, ename, binder):
(dummy, qname, ns, local, attributes) = binder.event
#Handle attributes by making them simple text data members
#if attributes and not hasattr(instance, "xml_attributes"):
if attributes:
instance.xml_attributes = {}
#No, "for aname in attributes" not possible because
#AttributesNS diesn't play by those rules :-(
for (ans, alocal), avalue in attributes.items():
aqname = attributes.getQNameByName((ans, alocal))
apyname = binder.naming_rule.xml_to_python(alocal, ans,
check_clashes=dir(instance))
#setattr(instance, apyname, avalue)
#Bypass __setattr__
instance.__dict__[apyname] = avalue
instance.xml_attributes[apyname] = (aqname, ans)
return
def is_element(obj):
#Test for element-ness. is there a better one?
return hasattr(obj, "localName")
def bind_instance(instance, parent, index=-1):
"""
Takes an instance that is fully constructed, and binds it as a child of
a given parent instance, according to a given index within the children
list
"""
assert instance is not parent
instance.xml_parent = parent
if index == -1:
parent.xml_children.append(instance)
else:
parent.xml_children[index:index] = instance
if not is_element(instance):
return instance
#We now know that it's an element
#qname = instance.nodeName
local = instance.localName
ns = instance.namespaceURI
ename = parent.xml_naming_rule.xml_to_python(local)
if hasattr(parent, ename):
obj = getattr(parent, ename)
if not hasattr(obj, "next_elem") or ns != obj.namespaceURI:
ename = parent.xml_naming_rule.xml_to_python(local, check_clashes=dir(parent))
#Bypass __setattr__
parent.__dict__[ename] = instance
else:
if index == 0:
instance.__dict__['next_elem'] = obj
#Bypass __setattr__
parent.__dict__[ename] = instance
else:
prev, curr = None, obj
while curr.next_elem is not None:
if curr is curr.next_elem:
raise BinderException(INSTANCE_ALREADY_BOUND)
if index != -1 and curr.xml_parent.xml_children.index(curr) >= index:
#We've found where instance goes relative to other
#elements of the same name. Interpolate it into the
#linked list
if prev:
prev.__dict__['next_elem'] = instance
else:
#Precede all other siblings with the same name
parent.__dict__[ename] = instance
instance.__dict__['next_elem'] = curr
break
prev, curr = curr, curr.next_elem
else:
curr.__dict__['next_elem'] = instance
instance.__dict__['next_elem'] = None
else:
#Bypass __setattr__
parent.__dict__[ename] = instance
instance.__dict__['next_elem'] = None
return instance
class default_element_rule(object):
def apply(self, binder):
if binder.event_completely_handled:
#Then another rule has already created an instance
return
parent = binder.binding_stack[TOP]
if not parent: return
(dummy, qname, ns, local, attributes) = binder.event
prefix = qname[:qname.rindex(local)][:-1]
ename = binder.naming_rule.xml_to_python(local, ns)
instance = create_element(binder.naming_rule, qname, ns, ename)
bind_attributes(instance, parent, ename, binder)
instance = bind_instance(instance, parent)
binder.binding_stack.append(instance)
#Inset a trigger for handling the matching end element
def handle_end(binder):
if binder.event_completely_handled:
return
#if binder.binding_stack[-1] is instance:
binder.binding_stack.pop()
binder.event_completely_handled = True
binder.remove_rule(handle_end, saxtools.END_ELEMENT)
return
handle_end.priority = 10
binder.add_rule(handle_end, saxtools.END_ELEMENT)
return
class default_root_rule(object):
def apply(self, binder):
instance = root_base()
instance.xml_naming_rule = binder.naming_rule
binder.binding_stack.append(instance)
return
class default_pi_rule(object):
def apply(self, binder):
if binder.event_completely_handled:
#Then another rule has already created an instance
return
parent = binder.binding_stack[TOP]
if not parent: return
(dummy, target, data) = binder.event
class_ = binder.pi_classes.get(target, pi_base)
instance = class_()
instance.target = target
instance.data = data
instance.xml_parent = parent
parent.xml_children.append(instance)
return
class default_comment_rule(object):
def apply(self, binder):
if binder.event_completely_handled:
#Then another rule has already created an instance
return
parent = binder.binding_stack[TOP]
if not parent: return
instance = comment_base()
(dummy, data) = binder.event
instance.data = data
instance.xml_parent = parent
parent.xml_children.append(instance)
return
class default_text_rule(object):
def apply(self, binder):
if binder.event_completely_handled:
#Then another rule has already created an instance
return
parent = binder.binding_stack[TOP]
if not parent: return
(dummy, text) = binder.event
parent.xml_children.append(text)
#No actual binding object mapped to this node
return
class default_container_node(object):
def is_attribute(self, pyname):
#Only elements can have attributes. element_base overrides to check
return False
#Mutation
def xml_clear(self):
"Remove all children"
#Tempting to do
#for c in self.xml_children: self.xml_remove_child(c)
#But that would just be a pig
self.xml_children = []
delete_attrs = []
for attr in self.__dict__:
if not (attr in self.xml_ignore_members or attr.startswith('xml')):
if getattr(self.__dict__[attr], 'next_elem', None):
delete_attrs.append(attr)
for attr in delete_attrs:
#Does not unlink all the way down the next_elem chain,
#But GC should take care of them once the first is unlinked
del self.__dict__[attr]
return
def xml_append(self, new):
"""
Append element or text
Returns the added child
"""
if isinstance(new, unicode):
self.xml_children.append(new)
elif new.nodeType == Node.ELEMENT_NODE:
bind_instance(new, self)
elif hasattr(new, 'nodeType'):
new.xml_parent = self
self.xml_children.append(new)
return new
def xml_insert_after(self, ref, new):
"""
Insert an object (element or text) after another child
ref - the existing child that marks the position for insert
new - the new child to be inserted
Returns the added child
"""
index = self.xml_children.index(ref)+1
if isinstance(new, unicode):
self.xml_children[index:index] = [new]
elif new.nodeType == Node.ELEMENT_NODE:
bind_instance(new, self, index)
elif hasattr(new, 'nodeType'):
new.xml_parent = self
self.xml_children[index:index] = [new]
return new
def xml_insert_before(self, ref, new):
"""
Insert an object (element or text) before another child
ref - the existing child that marks the position for insert
new - the new child to be inserted
Returns the added child
"""
index = self.xml_children.index(ref)
if isinstance(new, unicode):
self.xml_children[index:index] = [new]
elif new.nodeType == Node.ELEMENT_NODE:
bind_instance(new, self, index)
elif hasattr(new, 'nodeType'):
new.xml_parent = self
self.xml_children[index:index] = [new]
return new
def xml_append_fragment(self, text, encoding=None):
"""
Append chunk of literal XML to the children of the node instance
text - string (not Unicode, since this is a parse operation) fragment
of literal XML to be parsed. This XML fragment must be
a well-formed external parsed entity. Basically, multiple
root elements are allowed, but they must be properly balanced,
special characters escaped, and doctype declaration is
prohibited. According to XML rules, the encoded string is
assumed to be UTF-8 or UTF-16, but you can override this with
an XML text declaration ('<?xml version="1.0" encoding="ENC"?>')
or by passing in an encoding parameter to this function.
encoding - optional string with the encoding to be used in parsing the
XML fragment. Default to the standard XML rules (i.e. UTF-8,
UTF-16, or any encoding specified in text declaration). If this
parameter is specified, it overrrides any text declaration in
the XML fragment.
"""
from Ft.Xml.Domlette import EntityReader, GetAllNs, ParseFragment
from Ft.Xml import Sax, InputSource
#if encoding:
#text = '<?xml version="1.0" encoding="%s"?>'%(encoding) + text
isrc = InputSource.DefaultFactory.fromString(text, 'urn:x-amara:amara-xml-template')
if encoding:
isrc.encoding = encoding
nss = self.rootNode.xmlns_prefixes
nss.update(GetAllNs(self))
docfrag = ParseFragment(isrc, nss)
def walk_domlette(node, target):
if node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in node.xpathAttributes:
attrs[(attr.nodeName, attr.namespaceURI)] = attr.value
elem = target.xml_create_element(qname=node.nodeName, ns=node.namespaceURI, attributes=attrs)
target.xml_append(elem)
for child in node.childNodes:
walk_domlette(child, elem)
elif node.nodeType == Node.TEXT_NODE:
target.xml_append(node.data)
else:
for child in node.childNodes:
walk_domlette(child, target)
return
walk_domlette(docfrag, self)
return
def xml_create_element(self, qname, ns=None, content=None, attributes=None):
"Create a new, empty element (no attrs)"
instance = create_element(self.xml_naming_rule, qname, ns)
if content:
if not isinstance(content, list):
content = [ content ]
instance.xml_children = content
if attributes:
instance.xml_attributes = {}
for aname in attributes:
if isinstance(aname, tuple):
aqname, ans = aname
else:
aqname, ans = aname, None
avalue = attributes[aname]
apyname = self.xml_naming_rule.xml_to_python(
SplitQName(aqname)[1], ans,
check_clashes=dir(instance))
#Bypass __setattr__
instance.__dict__[apyname] = avalue
instance.xml_attributes[apyname] = (aqname, ans)
return instance
def xml_remove_child(self, obj):
"""
Remove the child equal to the given object
obj - any object valid as a bound element child
returns the removed child
"""
index = self.xml_children.index(obj)
return self.xml_remove_child_at(index)
def xml_remove_child_at(self, index=-1):
"""
Remove child object at a given index
index - optional, 0-based index of child to remove (defaults to the last child)
returns the removed child
"""
obj = self.xml_children[index]
if isinstance(obj, unicode):
removed = self.xml_children[index]
del self.xml_children[index]
else:
#Remove references to the object
#Probably a slow way to go about this
for attr, val in self.__dict__.items():
if not (attr.startswith('xml') or attr in self.xml_ignore_members):
next = getattr(val, 'next_elem', None)
if val == obj:
del self.__dict__[attr]
if next: self.__dict__[attr] = next
break
while next:
prev, val = val, next
next = getattr(val, 'next_elem', None)
if val == obj:
prev.next_elem = next
break
removed = self.xml_children[index]
del self.xml_children[index]
return removed
@property
def xml_properties(self):
"""
Return a dictionary whose keys are Python properties on this
object that represent XML attributes and elements, and whose vaues
are the corresponding objects (a subset of __dict__)
"""
properties = {}
for attr in self.__dict__:
if (not (attr.startswith('xml')
or attr in self.xml_ignore_members)):
properties[attr] = self.__dict__[attr]
return properties
@property
def xml_child_elements(self):
child_elements = {}
for attr, val in self.xml_properties.items():
if is_element(val):
child_elements[attr] = val
return child_elements
def xml_doc(self):
msg = []
xml_attrs = []
if hasattr(self, 'xml_attributes'):
msg.append('Object references based on XML attributes:')
for apyname in self.xml_attributes:
local, ns = self.xml_attributes[apyname]
if ns:
source_phrase = " based on '{%s}%s' in XML"%(ns, local)
else:
source_phrase = " based on '%s' in XML"%(local)
msg.append(apyname+source_phrase)
xml_attrs.append(apyname)
msg.append('Object references based on XML child elements:')
for attr, val in self.__dict__.items():
if not (attr.startswith('xml') or attr in self.xml_ignore_members):
if attr not in xml_attrs:
count = len(list(getattr(self, attr)))
if count == 1:
count_phrase = " (%s element)"%count
else:
count_phrase = " (%s elements)"%count
local, ns = val.localName, val.namespaceURI
if ns:
source_phrase = " based on '{%s}%s' in XML"%(ns, local)
else:
source_phrase = " based on '%s' in XML"%(local)
msg.append(attr+count_phrase+source_phrase)
return u'\n'.join(msg)
def __getitem__(self, key):
if isinstance(key, int):
result = list(self)[key]
else:
force_type = None
if isinstance(key, tuple):
if len(key) == 3:
force_type, key = key[0], key[1:]
elif isinstance(key, basestring):
key = (None, key)
else:
raise TypeError('Inappropriate key (%s)'%(key))
for attr, obj in self.xml_properties.items():
if self.is_attribute(attr):
qname, ns = self.xml_attributes.get(attr)
if (force_type in (None, Node.ATTRIBUTE_NODE)
and key == (ns, SplitQName(qname)[1])):
#The key references an XML attribute
#Bypass __setattr__
result = obj
break
elif is_element(obj):
if (force_type in (None, Node.ELEMENT_NODE)
and key == (obj.namespaceURI, obj.localName)):
result = obj
break
else:
raise KeyError('namespace/local name combination not found (%s)'%(str(key)))
return result
def __setitem__(self, key, value):
if isinstance(key, int):
child = self.__getitem__(key)
child.xml_clear()
child.xml_children = [value]
else:
force_type = None
if isinstance(key, tuple):
if len(key) == 3:
force_type, key = key[0], key[1:]
elif isinstance(key, basestring):
key = (None, key)
else:
raise TypeError('Inappropriate key (%s)'%(key))
for attr, obj in self.xml_properties.items():
if self.is_attribute(attr):
qname, ns = self.xml_attributes.get(attr)
if (force_type in (None, Node.ATTRIBUTE_NODE)
and key == (ns, SplitQName(qname)[1])):
#The key references an XML attribute
#Bypass __setattr__
self.__dict__[attr] = value
break
elif is_element(obj):
if (force_type in (None, Node.ELEMENT_NODE)
and key == (obj.namespaceURI, obj.localName)):
obj.xml_clear()
obj.xml_children = [value]
break
else:
raise KeyError('namespace/local name combination not found (%s)'%(str(key)))
return
def __delitem__(self, key):
if isinstance(key, int):
#child = self.__getitem__(key)
#index = self.xml_parent.xml_children.index(child)
self.xml_parent.xml_remove_child_at(key)
else:
force_type = None
if isinstance(key, tuple):
if len(key) == 3:
force_type, key = key[0], key[1:]
elif isinstance(key, basestring):
key = (None, key)
else:
raise TypeError('Inappropriate key (%s)'%(key))
for attr, obj in self.xml_properties.items():
if self.is_attribute(attr):
qname, ns = self.xml_attributes.get(attr)
if (force_type in (None, Node.ATTRIBUTE_NODE)
and key == (ns, SplitQName(qname)[1])):
#The key references an XML attribute
del self.xml_attributes[attr]
#Bypass __delattr__
del self.__dict__[attr]
break
elif is_element(obj):
if (force_type in (None, Node.ELEMENT_NODE)
and key == (obj.namespaceURI, obj.localName)):
self.xml_remove_child(obj)
break
else:
raise KeyError('namespace/local name combination not found (%s)'%(str(key)))
return
def xml_xslt(self, transform, params=None, output=None):
"""
Apply an XSLT transform directly to the bindery object
This function is quite limited, and will only handle the
simplest transforms. If you find your transform does not work with it,
serialize using xml() then use Ft.Xml.Xslt.transform, which is fully
XSLT compliant.
output - optional file-like object to which output is written
(incrementally, as processed)
if stream is given, output to stream, and return value is None
If stream is not given return value is the transform result
as a Python string (not Unicode)
params - optional dictionary of stylesheet parameters, the keys of
which may be given as unicode objects if they have no namespace,
or as (uri, localname) tuples if they do.
"""
from Ft.Xml.Xslt import Processor, _AttachStylesheetToProcessor
from Ft.Xml import InputSource
from Ft.Lib import Uri, Uuid
from Ft.Xml.Lib.XmlString import IsXml
params = params or {}
processor = Processor.Processor()
_AttachStylesheetToProcessor(transform, processor)
return processor.runNode(self, topLevelParams=params,
outputStream=output)
class root_base(default_container_node, xpath_wrapper_container_mixin):
"""
Base class for root nodes (similar to DOM documents
and document fragments)
"""
nodeType = Node.DOCUMENT_NODE
xml_ignore_members = RESERVED_NAMES
def __init__(self, naming_rule=None, doctype_name=None,
pubid=None, sysid=None, xmlns_prefixes=None):
if not naming_rule: naming_rule = default_naming_rule()
self.xml_children = []
self.nodeName = u'#document'
self.xml_naming_rule = naming_rule
self.xmlns_prefixes = xmlns_prefixes or {}
if doctype_name:
self.xml_pubid = pubid
self.xml_sysid = sysid
self.xml_doctype_name = doctype_name
return
def xml(self, stream=None, writer=None, force_nsdecls=None, **wargs):
"""
serialize back to XML
if stream is given, output to stream. Function return value is None
You can then set the following output parameters (among others):
encoding - output encoding: default UTF-8
omitXmlDeclaration - u'yes' to omit the XML decl (default u'no')
cdataSectionElements - A list of element (namespace, local-name)
And all matching elements are outptu as
CDATA sections
indent - u'yes' to pretty-print the XML (default u'no')
other output parameters are supported, based on XSLT 1.0's xsl:output
instruction, but use fo the others is encouraged only for very advanced
users
You can also just pass in your own writer instance, which might
be useful if you want to output a SAX stream or DOM nodes
If writer is given, use it directly (encoding can be set on the writer)
if neither a stream nor a writer is given, return the output text
as a Python string (not Unicode) encoded as UTF-8
You can force Amara to emit particular namespace declarations on the
top-level element using the optional force_nsdecls argument. This
is a dictionary with unicode prefix as the key and unicode namespace URI
as the value. You can, for example, use the xmlns_prefixes dictionary from
any root node.
"""
temp_stream = None
if not writer:
#As a convenience, allow cdata section element defs to be simple QName
if wargs.get('cdataSectionElements'):
cdses = wargs['cdataSectionElements']
cdses = [ isinstance(e, tuple) and e or (None, e)
for e in cdses ]
wargs['cdataSectionElements'] = cdses
if hasattr(self, "xml_sysid"):
sysid, pubid = self.xml_sysid, self.xml_pubid
else:
sysid, pubid = None, None
if stream:
writer = create_writer(stream, wargs, pubid=pubid,
sysid=sysid)
else:
temp_stream = cStringIO.StringIO()
writer = create_writer(temp_stream, wargs,
pubid=pubid,
sysid=sysid)
writer.startDocument()
for child in self.xml_children:
if isinstance(child, unicode):
writer.text(child)
else:
child.xml(writer=writer, force_nsdecls=force_nsdecls)
writer.endDocument()
return temp_stream and temp_stream.getvalue()
#Needed for Print and PrettyPrint
#But should we support these, since we have xml(),
#which can take a writer with indent="yes?"
def _doctype(self):
class doctype_wrapper(object):
def __init__(self, name, pubid, sysid):
self.name = name
self.publicId = pubid
self.systemId = sysid
return
if hasattr(self, "xml_sysid"):
return doctype_wrapper(self.xml_doctype_name, self.xml_pubid,
self.xml_sysid)
else:
return None
doctype = property(_doctype)
class pi_base(dummy_node_wrapper):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
def __init__(self, target=None, data=None):
self.target = target
self.data = data
return
def xml(self, stream=None, writer=None, **wargs):
"""
If writer is None, stream cannot be None
"""
if not writer: writer = create_writer(stream)
writer.processingInstruction(self.target, self.data)
return
def __cmp__(self, other):
return cmp(id(self), id(other))
class comment_base(dummy_node_wrapper):
nodeType = Node.COMMENT_NODE
def __init__(self, data=None):
self.data = data
return
def xml(self, stream=None, writer=None, **wargs):
"""
If writer is None, stream cannot be None
"""
if not writer: writer = create_writer(stream)
writer.comment(self.data)
return
def __cmp__(self, other):
return cmp(id(self), id(other))
class element_iterator:
def __init__(self, start):
self.curr = start
return
def __iter__(self):
return self
def next(self):
if not self.curr:
raise StopIteration()
result = self.curr
if self.curr.next_elem is not None:
self.curr = self.curr.next_elem
else:
self.curr = None
return result
class element_base(default_container_node, xpath_wrapper_container_mixin):
nodeType = Node.ELEMENT_NODE
#xml_ignore_members = ['nodeName']
xml_ignore_members = RESERVED_NAMES
def __init__(self):
self.xml_children = []
self.next_elem = None
return
def __iter__(self):
return element_iterator(self)
def __len__(self):
count = 1
curr = self
while curr.next_elem is not None:
count += 1
curr = curr.next_elem
return count
def __delattr__(self, key):
if key.startswith('xml') or key in RESERVED_NAMES:
del self.__dict__[key]
return
ref = getattr(self, key)
if is_element(ref):
self.xml_remove_child(ref)
elif isinstance(ref, unicode):
del self.__dict__[key]
del self.xml_attributes[key]
return
def __setattr__(self, key, value):
if key.startswith('xml') or key in RESERVED_NAMES:
self.__dict__[key] = value
return
if hasattr(self, key):
ref = getattr(self, key)
if is_element(ref):
ref.xml_clear()
ref.xml_children = [value]
#elif isinstance(ref, unicode):
else:
self.__dict__[key] = value
return
elif isinstance(value, unicode):
self.__dict__[key] = value
if not hasattr(self, 'xml_attributes'):
self.xml_attributes = {}
self.xml_attributes[key] = (key.decode('iso-8859-1'), None)
else:
raise ValueError('Inappropriate set attribute request: key (%s), value (%s)'%(key, value))
return
def xml_set_attribute(self, aname, avalue):
"Set (or create) an attribute on ana element"
if isinstance(aname, tuple):
aqname, ans = aname
else:
aqname, ans = aname, None
prefix, local = SplitQName(aqname)
if prefix == u'xml':
ans = XML_NS
elif ans and not prefix and ans in self.rootNode.xmlns_prefixes.values():
#If the user specified a namespace URI and not a prefix, they would
#Usually get an ugly generated prefix. Check the document for a nicer
#Alternative
#Note: this could cause spurious namespace declarations if the document is not sane
prefix = [ p for p, u in self.rootNode.xmlns_prefixes.items() if u == ans ][0]
aqname = prefix + u':' + local
apyname = self.xml_naming_rule.xml_to_python(
local, ans,
check_clashes=dir(self))
#Bypass __setattr__
self.__dict__[apyname] = avalue
if not hasattr(self, 'xml_attributes'):
self.xml_attributes = {}
self.xml_attributes[apyname] = (aqname, ans)
return apyname
#def __setattr__(self, attr, value):
#Equivalent to creating a bound attribute
# self.__dict__[attr] = value
# return
#def count(self):
# return len(list(self))
def is_attribute(self, pyname):
#Test a Python property (specified by name) to see whether it comes
#from and XML attribute
return (hasattr(self, 'xml_attributes') and self.xml_attributes.has_key(pyname))
@property
def xml_index_on_parent(self):
try:
index = self.xml_parent.xml_children.index(self)
except ValueError: #Not found
raise
return index
@property
def xml_child_text(self):
return u''.join([ ch for ch in self.xml_children
if isinstance(ch, unicode)])
@property
def xml_text_content(self):
warnings.warn('This property will be eliminated soon. Please use the unicode conversion function instead')
return self.xml_child_text
def __unicode__(self):
'''
Returns a Unicode object with the text contents of this node and
its descendants, if any.
Equivalent to DOM .textContent or XPath string() conversion
'''
return u''.join([ unicode(ch) for ch in self.xml_children
if not isinstance(ch, pi_base) and not isinstance(ch, comment_base)])
def __str__(self):
#Should we make the encoding configurable? (self.defencoding?)
return unicode(self).encode('utf-8')
def xml(self, stream=None, writer=None, force_nsdecls=None, **wargs):
"""
serialize back to XML
if stream is given, output to stream. Function return value is None
You can then set the following output parameters (among others):
encoding - output encoding: default u'UTF-8'
omitXmlDeclaration - u'no' to include an XML decl (default u'yes')
cdataSectionElements - A list of element (namespace, local-name)
And all matching elements are outptu as
CDATA sections
indent - u'yes' to pretty-print the XML (default u'no')
other output parameters are supported, based on XSLT 1.0's xsl:output
instruction, but use fo the others is encouraged only for very advanced
users
You can also just pass in your own writer instance, which might
be useful if you want to output a SAX stream or DOM nodes
If writer is given, use it directly (encoding can be set on the writer)
if neither a stream nor a writer is given, return the output text
as a Python string (not Unicode) encoded as UTF-8
You can force Amara to emit particular namespace declarations on the
top-level element using the optional force_nsdecls argument. This
is a dictionary with unicode prefix as the key and unicode namespace URI
as the value. You can, for example, use the xmlns_prefixes dictionary from
any root node.
"""
temp_stream = None
close_document = 0
if not writer:
#Change the default to *not* generating an XML decl
if not wargs.get('omitXmlDeclaration'):
wargs['omitXmlDeclaration'] = u'yes'
if stream:
writer = create_writer(stream, wargs)
else:
temp_stream = cStringIO.StringIO()
writer = create_writer(temp_stream, wargs)
writer.startDocument()
close_document = 1
writer.startElement(self.nodeName, self.namespaceURI,
extraNss=force_nsdecls)
if hasattr(self, 'xml_attributes'):
for apyname in self.xml_attributes:
aqname, ans = self.xml_attributes[apyname]
val = self.__dict__[apyname]
writer.attribute(aqname, val, ans)
for child in self.xml_children:
if isinstance(child, unicode):
writer.text(child)
else:
child.xml(writer=writer)
writer.endElement(self.nodeName, self.namespaceURI)
if close_document:
writer.endDocument()
return temp_stream and temp_stream.getvalue()
def create_writer(stream, wargs=None, pubid=None, sysid=None, encoding="UTF-8"):
wargs = wargs or {}
if not stream:
raise BinderException(NO_STREAM_GIVEN_FOR_UNBIND)
op = OutputParameters.OutputParameters()
for arg in wargs:
setattr(op, arg, wargs[arg])
#Doctype info in the document object override any given explicitly
if sysid: op.doctypeSystem = sysid
if pubid: op.doctypePublic = pubid
#writer = XmlWriter.XmlWriter(op, stream)
writer = XmlWriter.CdataSectionXmlWriter(op, stream)
return writer
class BinderException(Exception):
pass
#CLASH_BETWEEN_SCALAR_AND_SUBELEM = _('Bindery does not yet handle a name clash between an attribute and a child element')
NO_STREAM_GIVEN_FOR_UNBIND = _('You must provide a stream for the output of the xml method (serialization)')
INSTANCE_ALREADY_BOUND = _('Instance already bound to parent')
| {
"repo_name": "AmericanResearchInstitute/poweru-server",
"path": "cmis_storage/amara/bindery.py",
"copies": "1",
"size": "46359",
"license": "bsd-3-clause",
"hash": -2548189449304707600,
"line_mean": 37.5682196339,
"line_max": 122,
"alpha_frac": 0.5741064303,
"autogenerated": false,
"ratio": 4.259762933014794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005188805576633574,
"num_lines": 1202
} |
__all__ = ["Bind"]
from log import VLOG
from browser.status import *
""" Bind class accept 2 parameters: a callable function and a list where variable
parameters packed in.
there're 2 ways binding: you can use "Bind(func, [argument0, argument1, ...])"
as an early binding, also you can use "Bind(func)" as a dynamical binding. when truly
run it you need call Update([argument0, argument1, ...]) at first.
FYI, the execute function should not be uncallable ones, when casually init an uncallable
execute function willl result in exception """
class Bind(object):
# default execute funtion doing nothing
@staticmethod
def _RunNothing():
return Status(kOk)
def __init__(self, runner=None, args=[]):
if callable(runner) == False:
VLOG(0, "execute function isn't callable")
raise Exception("execute function isn't callable")
try:
self.runner = runner
self.args = args
except:
self.runner = Bind._RunNothing
def Update(self, args=[]):
self.args = args
def Run(self):
return self.runner(*(self.args))
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "base/bind.py",
"copies": "1",
"size": "1075",
"license": "bsd-3-clause",
"hash": 3209541880793187300,
"line_mean": 27.2894736842,
"line_max": 89,
"alpha_frac": 0.6855813953,
"autogenerated": false,
"ratio": 3.8392857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9702534094610805,
"avg_score": 0.0644666029949819,
"num_lines": 38
} |
__all__ = ['BioCCollection']
from meta import _MetaInfons, _MetaIter
from compat import _Py2Next
class BioCCollection(_Py2Next, _MetaInfons, _MetaIter):
def __init__(self, collection=None):
self.infons = dict()
self.source = ''
self.date = ''
self.key = ''
self.documents = list()
if collection is not None:
self.infons = collection.infons
self.source = collection.source
self.date = collection.date
self.key = collection.key
self.documents = collection.documents
def __str__(self):
s = 'source: ' + self.source + '\n'
s += 'date: ' + self.date + '\n'
s += 'key: ' + self.key + '\n'
s += str(self.infons) + '\n'
s += str(self.documents) + '\n'
return s
def _iterdata(self):
return self.documents
def clear_documents(self):
self.documents = list()
def get_document(self, doc_idx):
return self.documents[doc_idx]
def add_document(self, document):
self.documents.append(document)
def remove_document(self, document):
if type(document) is int:
self.dcouments.remove(self.documents[document])
else:
self.documents.remove(document) # TBC
| {
"repo_name": "SuLab/PyBioC",
"path": "src/bioc/bioc_collection.py",
"copies": "1",
"size": "1302",
"license": "bsd-2-clause",
"hash": -3348451241101115000,
"line_mean": 25.5714285714,
"line_max": 59,
"alpha_frac": 0.5637480799,
"autogenerated": false,
"ratio": 3.7413793103448274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9805127390244828,
"avg_score": 0,
"num_lines": 49
} |
__all__ = ['BioCDocument']
from compat import _Py2Next
from meta import _MetaId, _MetaInfons, _MetaRelations, _MetaIter
class BioCDocument(_MetaId, _MetaInfons, _MetaRelations, _MetaIter,
_Py2Next):
def __init__(self, document=None):
self.id = ''
self.infons = dict()
self.relations = list()
self.passages = list()
if document is not None:
self.id = document.id
self.infons = document.infons
self.relations = document.relations
self.passages = document.passages
def __str__(self):
s = 'id: ' + self.id + '\n'
s += 'infon: ' + str(self.infons) + '\n'
s += str(self.passages) + '\n'
s += 'relation: ' + str(self.relations) + '\n'
return s
def _iterdata(self):
return self.passages
def get_size(self):
return self.passages.size() # As in Java BioC
def clear_passages(self):
self.passages = list()
def add_passage(self, passage):
self.passages.append(passage)
def remove_passage(self, passage):
if type(passage) is int:
self.passages.remove(self.passages[passage])
else:
self.passages.remove(passage) # TBC
| {
"repo_name": "telukir/PubMed2Go",
"path": "BioC_export/bioc/bioc_document.py",
"copies": "2",
"size": "1262",
"license": "isc",
"hash": 7176245181038424000,
"line_mean": 26.4347826087,
"line_max": 67,
"alpha_frac": 0.5649762282,
"autogenerated": false,
"ratio": 3.4575342465753423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001175168134886805,
"num_lines": 46
} |
__all__ = ['BioCDocument']
from compat import _Py2Next
from meta import _MetaId, _MetaInfons, _MetaRelations, _MetaIter
class BioCDocument(_MetaId, _MetaInfons, _MetaRelations, _MetaIter, _Py2Next):
def __init__(self, document=None):
self.id = ''
self.infons = dict()
self.relations = list()
self.passages = list()
if document is not None:
self.id = document.id
self.infons = document.infons
self.relations = document.relations
self.passages = document.passages
def __str__(self):
s = 'id: ' + self.id + '\n'
s += 'infon: ' + str(self.infons) + '\n'
s += str(self.passages) + '\n'
s += 'relation: ' + str(self.relations) + '\n'
return s
def _iterdata(self):
return self.passages
def get_size(self):
return self.passages.size() # As in Java BioC
def clear_passages(self):
self.passages = list()
def add_passage(self, passage):
self.passages.append(passage)
def remove_passage(self, passage):
if type(passage) is int:
self.passages.remove(self.passages[passage])
else:
self.passages.remove(passage) # TBC
| {
"repo_name": "SuLab/PyBioC",
"path": "src/bioc/bioc_document.py",
"copies": "1",
"size": "1246",
"license": "bsd-2-clause",
"hash": 6741394778555290000,
"line_mean": 26.0869565217,
"line_max": 78,
"alpha_frac": 0.5722311396,
"autogenerated": false,
"ratio": 3.4043715846994536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9476602724299454,
"avg_score": 0,
"num_lines": 46
} |
__all__ = ['BioCPassage']
from meta import _MetaAnnotations, _MetaInfons, _MetaOffset, _MetaRelations, \
_MetaText
class BioCPassage(_MetaAnnotations, _MetaOffset, _MetaText, _MetaRelations,
_MetaInfons):
def __init__(self, passage=None):
self.offset = '-1'
self.text = ''
self.infons = dict()
self.sentences = list()
self.annotations = list()
self.relations = list()
if passage is not None:
self.offset = passage.offset
self.text = passage.text
self.infons = passage.infons
self.sentences = passage.sentences
self.annotations = passage.annotations
self.relations = passage.relations
def size(self):
return len(self.sentences)
def has_sentences(self):
if len(self.sentences) > 0:
return True
def add_sentence(self, sentence):
self.sentences.append(sentence)
def sentences_iterator(self):
return self.sentences.iterator() # TBD
def clear_sentences(self):
self.relations = list()
def remove_sentence(self, sentence): # int or obj
if type(sentence) is int:
self.sentences.remove(self.sentences[sentence])
else:
self.sentences.remove(sentence)
| {
"repo_name": "SuLab/PyBioC",
"path": "src/bioc/bioc_passage.py",
"copies": "1",
"size": "1324",
"license": "bsd-2-clause",
"hash": 1403453558138028500,
"line_mean": 27.170212766,
"line_max": 78,
"alpha_frac": 0.5974320242,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50974320242,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BioCPassage']
from meta import _MetaAnnotations, _MetaInfons, _MetaOffset, \
_MetaRelations, _MetaText
class BioCPassage(_MetaAnnotations, _MetaOffset, _MetaText,
_MetaRelations, _MetaInfons):
def __init__(self, passage=None):
self.offset = '-1'
self.text = ''
self.infons = dict()
self.sentences = list()
self.annotations = list()
self.relations = list()
if passage is not None:
self.offset = passage.offset
self.text = passage.text
self.infons = passage.infons
self.sentences = passage.sentences
self.annotations = passage.annotations
self.relations = passage.relations
def size(self):
return len(self.sentences)
def has_sentences(self):
if len(self.sentences) > 0:
return True
def add_sentence(self, sentence):
self.sentences.append(sentence)
def sentences_iterator(self):
return self.sentences.iterator() # TBD
def clear_sentences(self):
self.relations = list()
def remove_sentence(self, sentence): # int or obj
if type(sentence) is int:
self.sentences.remove(self.sentences[sentence])
else:
self.sentences.remove(sentence)
| {
"repo_name": "telukir/PubMedPortable",
"path": "BioC_export/bioc/bioc_passage.py",
"copies": "2",
"size": "1342",
"license": "isc",
"hash": 7842576895664286000,
"line_mean": 28.1739130435,
"line_max": 62,
"alpha_frac": 0.5894187779,
"autogenerated": false,
"ratio": 4.079027355623101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5668446133523101,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BioCReader']
import StringIO
from lxml import etree
from bioc_annotation import BioCAnnotation
from bioc_collection import BioCCollection
from bioc_document import BioCDocument
from bioc_location import BioCLocation
from bioc_passage import BioCPassage
from bioc_sentence import BioCSentence
from bioc_node import BioCNode
from bioc_relation import BioCRelation
class BioCReader:
"""
This class can be used to store BioC XML files in PyBioC objects,
for further manipulation.
"""
def __init__(self, source, dtd_valid_file=None):
"""
source: File path to a BioC XML input document.
dtd_valid_file: File path to a BioC.dtd file. Using this
optional argument ensures DTD validation.
"""
self.source = source
self.collection = BioCCollection()
self.xml_tree = etree.parse(source)
if dtd_valid_file is not None:
dtd = etree.DTD(dtd_valid_file)
if dtd.validate(self.xml_tree) is False:
raise(Exception(dtd.error_log.filter_from_errors()[0]))
def read(self):
"""
Invoke this method in order to read in the file provided by
the source class variable. Only after this method has been
called the BioCReader object gets populated.
"""
self._read_collection()
def _read_collection(self):
collection_elem = self.xml_tree.xpath('/collection')[0]
self.collection.source = collection_elem.xpath('source')[0].text
self.collection.date = collection_elem.xpath('date')[0].text
self.collection.key = collection_elem.xpath('key')[0].text
infon_elem_list = collection_elem.xpath('infon')
document_elem_list = collection_elem.xpath('document')
self._read_infons(infon_elem_list, self.collection)
self._read_documents(document_elem_list)
def _read_infons(self, infon_elem_list, infons_parent_elem):
for infon_elem in infon_elem_list:
infons_parent_elem.put_infon(self._get_infon_key(infon_elem),
infon_elem.text)
def _read_documents(self, document_elem_list):
for document_elem in document_elem_list:
document = BioCDocument()
document.id = document_elem.xpath('id')[0].text
self._read_infons(document_elem.xpath('infon'), document)
self._read_passages(document_elem.xpath('passage'),
document)
self._read_relations(document_elem.xpath('relation'),
document)
self.collection.add_document(document)
def _read_passages(self, passage_elem_list, document_parent_elem):
for passage_elem in passage_elem_list:
passage = BioCPassage()
self._read_infons(passage_elem.xpath('infon'), passage)
passage.offset = passage_elem.xpath('offset')[0].text
# Is this BioC document with <sentence>?
if len(passage_elem.xpath('sentence')) > 0:
self._read_sentences(passage_elem.xpath('sentence'), passage)
else:
# Is the (optional) text element available?
try:
passage.text = passage_elem.xpath('text')[0].text
except:
pass
self._read_annotations(passage_elem.xpath('annotation'),
passage)
self._read_relations(passage_elem.xpath('relation'), passage)
document_parent_elem.add_passage(passage)
def _read_sentences(self, sentence_elem_list, passage_parent_elem):
for sentence_elem in sentence_elem_list:
sentence = BioCSentence()
self._read_infons(sentence_elem.xpath('infon'), sentence)
sentence.offset = sentence_elem.xpath('offset')[0].text
sentence.text = sentence_elem.xpath('text')[0].text
self._read_annotations(sentence_elem.xpath('annotation'), sentence)
self._read_relations(sentence_elem.xpath('relation'), sentence)
passage_parent_elem.add_sentence(sentence)
def _read_annotations(self, annotation_elem_list, annotations_parent_elem):
for annotation_elem in annotation_elem_list:
annotation = BioCAnnotation()
# Attribute id is just #IMPLIED, not #REQUIRED
if 'id' in annotation_elem.attrib:
annotation.id = annotation_elem.attrib['id']
self._read_infons(annotation_elem.xpath('infon'), annotation)
for location_elem in annotation_elem.xpath('location'):
location = BioCLocation()
location.offset = location_elem.attrib['offset']
location.length = location_elem.attrib['length']
annotation.add_location(location)
annotation.text = annotation_elem.xpath('text')[0].text
annotations_parent_elem.add_annotation(annotation)
def _read_relations(self, relation_elem_list, relations_parent_elem):
for relation_elem in relation_elem_list:
relation = BioCRelation()
# Attribute id is just #IMPLIED, not #REQUIRED
if 'id' in relation_elem.attrib:
relation.id = relation_elem.attrib['id']
self._read_infons(relation_elem.xpath('infon'), relation)
for node_elem in relation_elem.xpath('node'):
node = BioCNode()
node.refid = node_elem.attrib['refid']
node.role = node_elem.attrib['role']
relation.add_node(node)
relations_parent_elem.add_relation(relation)
def _get_infon_key(self, elem):
return elem.attrib['key']
| {
"repo_name": "SuLab/PyBioC",
"path": "src/bioc/bioc_reader.py",
"copies": "1",
"size": "5830",
"license": "bsd-2-clause",
"hash": -1406121962696513500,
"line_mean": 38.6598639456,
"line_max": 79,
"alpha_frac": 0.6065180103,
"autogenerated": false,
"ratio": 4.042995839112344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149513849412344,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BioCWriter']
from lxml.builder import E
from lxml.etree import tostring
class BioCWriter:
def __init__(self, filename=None, collection=None):
self.root_tree = None
self.collection = None
self.doctype = '''<?xml version='1.0' encoding='UTF-8'?>'''
self.doctype += '''<!DOCTYPE collection SYSTEM 'BioC.dtd'>'''
self.filename = filename
if collection is not None:
self.collection = collection
if filename is not None:
self.filename = filename
def __str__(self):
""" A BioCWriter object can be printed as string.
"""
self._check_for_data()
self.build()
s = tostring(self.root_tree, pretty_print=True, doctype=self.doctype)
return s
def _check_for_data(self):
if self.collection is None:
raise(Exception('No data available.'))
def write(self, filename=None):
""" Use this method to write the data in the PyBioC objects
to disk.
filename: Output file path (optional argument; filename
provided by __init__ used otherwise.)
"""
if filename is not None:
self.filename = filename
if self.filename is None:
raise(Exception('No output file path provided.'))
f = open(self.filename, 'w')
f.write(self.__str__())
def build(self):
self._build_collection()
def _build_collection(self):
self.root_tree = E('collection', E('source'), E('date'), E('key'))
self.root_tree.xpath('source')[0].text = self.collection.source
self.root_tree.xpath('date')[0].text = self.collection.date
self.root_tree.xpath('key')[0].text = self.collection.key
collection_elem = self.root_tree.xpath('/collection')[0]
# infon*
self._build_infons(self.collection.infons, collection_elem)
# document+
self._build_documents(self.collection.documents, collection_elem)
def _build_infons(self, infons_dict, infons_parent_elem):
for infon_key, infon_val in infons_dict.items():
infons_parent_elem.append(E('infon'))
infon_elem = infons_parent_elem.xpath('infon')[-1]
infon_elem.attrib['key'] = infon_key
infon_elem.text = infon_val
def _build_documents(self, documents_list, collection_parent_elem):
for document in documents_list:
collection_parent_elem.append(E('document', E('id')))
document_elem = collection_parent_elem.xpath('document')[-1]
# id
id_elem = document_elem.xpath('id')[0]
id_elem.text = document.id
# infon*
self._build_infons(document.infons, document_elem)
# passage+
self._build_passages(document.passages, document_elem)
# relation*
self._build_relations(document.relations, document_elem)
def _build_passages(self, passages_list, document_parent_elem):
for passage in passages_list:
document_parent_elem.append(E('passage'))
passage_elem = document_parent_elem.xpath('passage')[-1]
# infon*
self._build_infons(passage.infons, passage_elem)
# offset
passage_elem.append(E('offset'))
passage_elem.xpath('offset')[0].text = passage.offset
if passage.has_sentences():
# sentence*
self._build_sentences(passage.sentences, passage_elem)
else:
# text?, annotation*
passage_elem.append(E('text'))
passage_elem.xpath('text')[0].text = passage.text
self._build_annotations(passage.annotations,
passage_elem)
# relation*
self._build_relations(passage.relations, passage_elem)
def _build_relations(self, relations_list, relations_parent_elem):
for relation in relations_list:
relations_parent_elem.append(E('relation'))
relation_elem = relations_parent_elem.xpath('relation')[-1]
# infon*
self._build_infons(relation.infons, relation_elem)
# node*
for node in relation.nodes:
relation_elem.append(E('node'))
node_elem = relation_elem.xpath('node')[-1]
node_elem.attrib['refid'] = node.refid
node_elem.attrib['role'] = node.role
# id (just #IMPLIED)
if len(relation.id) > 0:
relation_elem.attrib['id'] = relation.id
def _build_annotations(self, annotations_list, annotations_parent_elem):
for annotation in annotations_list:
annotations_parent_elem.append(E('annotation'))
annotation_elem = \
annotations_parent_elem.xpath('annotation')[-1]
# infon*
self._build_infons(annotation.infons, annotation_elem)
# location*
for location in annotation.locations:
annotation_elem.append(E('location'))
location_elem = annotation_elem.xpath('location')[-1]
location_elem.attrib['offset'] = location.offset
location_elem.attrib['length'] = location.length
# text
annotation_elem.append(E('text'))
text_elem = annotation_elem.xpath('text')[0]
text_elem.text = annotation.text
# id (just #IMPLIED)
if len(annotation.id) > 0:
annotation_elem.attrib['id'] = annotation.id
def _build_sentences(self, sentences_list, passage_parent_elem):
for sentence in sentences_list:
passage_parent_elem.append(E('sentence'))
sentence_elem = passage_parent_elem.xpath('sentence')[-1]
# infon*
self._build_infons(sentence.infons, sentence_elem)
# offset
sentence_elem.append(E('offset'))
offset_elem = sentence_elem.xpath('offset')[0]
offset_elem.text = sentence.offset
# text?
if len(sentence.text) > 0:
sentence_elem.append(E('text'))
text_elem = sentence_elem.xpath('text')[0]
text_elem.text = sentence.text
# annotation*
self._build_annotations(sentence.annotations, sentence_elem)
# relation*
self._build_relations(sentence.relations, sentence_elem)
| {
"repo_name": "SuLab/PyBioC",
"path": "src/bioc/bioc_writer.py",
"copies": "1",
"size": "6551",
"license": "bsd-2-clause",
"hash": 1495877329484040700,
"line_mean": 38.4638554217,
"line_max": 77,
"alpha_frac": 0.5692260724,
"autogenerated": false,
"ratio": 4.107210031347963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5176436103747962,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Blackboard']
class Blackboard(object):
def __init__(self):
self._base_memory = {}
self._tree_memory = {}
def _get_tree_memory(self, tree_scope):
if (tree_scope not in self._tree_memory):
self._tree_memory[tree_scope] = {
'node_memory': {},
'open_nodes': []
}
return self._tree_memory[tree_scope]
def _get_node_memory(self, tree_memory, node_scope):
memory = tree_memory['node_memory']
if (node_scope not in memory):
memory[node_scope] = {}
return memory[node_scope]
def _get_memory(self, tree_scope, node_scope):
memory = self._base_memory
if (tree_scope is not None):
memory = self._get_tree_memory(tree_scope)
if (node_scope is not None):
memory = self._get_node_memory(memory, node_scope)
return memory
def set(self, key, value, tree_scope=None, node_scope=None):
memory = self._get_memory(tree_scope, node_scope)
memory[key] = value
def get(self, key, tree_scope=None, node_scope=None):
memory = self._get_memory(tree_scope, node_scope)
return memory.get(key) | {
"repo_name": "renatopp/behavior3py",
"path": "b3/core/blackboard.py",
"copies": "2",
"size": "1231",
"license": "mit",
"hash": 6621854830400299000,
"line_mean": 28.3333333333,
"line_max": 66,
"alpha_frac": 0.5613322502,
"autogenerated": false,
"ratio": 3.6528189910979227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017094017094017092,
"num_lines": 42
} |
"""All blast commands used by aTRAM."""
import glob
import json
import os
import re
import sys
from os.path import basename, dirname, join
from shutil import which
from . import util
def create_db(log, temp_dir, fasta_file, shard):
"""Create a blast database."""
cmd = 'makeblastdb -dbtype nucl -in {} -out {}'
cmd = cmd.format(fasta_file, shard)
log.subcommand(cmd, temp_dir)
def against_sra(args, log, state, hits_file, shard):
"""Blast the query sequences against an SRA blast database."""
cmd = []
if args['protein'] and state['iteration'] == 1:
cmd.append('tblastn')
cmd.append('-db_gencode {}'.format(args['blast_db_gencode']))
else:
cmd.append('blastn')
cmd.append('-evalue {}'.format(args['blast_evalue']))
cmd.append('-outfmt 15')
cmd.append('-max_target_seqs {}'.format(args['blast_max_target_seqs']))
cmd.append('-out {}'.format(hits_file))
cmd.append('-db {}'.format(shard))
cmd.append('-query {}'.format(state['query_file']))
if args['blast_word_size']:
cmd.append('-word_size {}'.format(args['blast_word_size']))
command = ' '.join(cmd)
log.subcommand(command, args['temp_dir'], timeout=args['timeout'])
def against_contigs(log, blast_db, query_file, hits_file, **kwargs):
"""Blast the query sequence against the contigs.
The blast output will have the scores for later processing.
"""
cmd = []
if kwargs['protein']:
cmd.append('tblastn')
cmd.append('-db_gencode {}'.format(kwargs['blast_db_gencode']))
else:
cmd.append('blastn')
cmd.append('-db {}'.format(blast_db))
cmd.append('-query {}'.format(query_file))
cmd.append('-out {}'.format(hits_file))
cmd.append('-outfmt 15')
command = ' '.join(cmd)
log.subcommand(command, kwargs['temp_dir'], timeout=kwargs['timeout'])
def all_shard_paths(log, blast_db):
"""Get all of the BLAST shard names built by the preprocessor."""
pattern = '{}.*.blast.nhr'.format(blast_db)
files = glob.glob(pattern)
if not files:
err = ('No blast shards found. Looking for "{}"\n'
'Verify the --work-dir and --file-prefix options.').format(
pattern[:-4])
log.fatal(err)
return sorted(f[:-4] for f in files)
def output_file_name(temp_dir, shrd_path):
"""Create a file name for blast results."""
shard_name = basename(shrd_path)
file_name = '{}.results.json'.format(shard_name)
return join(temp_dir, file_name)
def temp_db_name(temp_dir, blast_db):
"""Generate a name for the temp DB used to filter the contigs."""
file_name = basename(blast_db)
return join(temp_dir, file_name)
def get_raw_hits(log, json_file):
"""Extract the raw blast hits from the blast json output file."""
with open(json_file) as blast_file:
raw = blast_file.read()
# Allow empty results
if not raw:
return []
# Do not allow bad json
try:
obj = json.loads(raw)
except json.decoder.JSONDecodeError:
err = ('Blast output is not in JSON format. '
'You may need to upgrade blast.')
log.fatal(err)
return obj['BlastOutput2'][0]['report']['results']['search'].get('hits', [])
def hits(log, json_file):
"""Extract the blast hits from the blast json output file."""
hits_list = []
raw_hits = get_raw_hits(log, json_file)
for raw in raw_hits:
for i, desc in enumerate(raw['description']):
hit = dict(desc)
hit['len'] = raw['len']
hit.update(raw['hsps'][i])
hits_list.append(hit)
return hits_list
def command_line_args(parser):
"""Add optional blast arguments to the command-line parser."""
group = parser.add_argument_group('optional blast arguments')
group.add_argument('--blast-db-gencode', '--db-gencode', type=int,
default=1, metavar='CODE',
help="""The genetic code to use during blast runs.
(default %(default)s)""")
group.add_argument('--blast-evalue', '--evalue', type=float, default=1e-10,
help="""(default %(default)s)""")
group.add_argument('--blast-word-size', '--word-size', type=int,
help="""Word size for wordfinder algorithm.
'Must be >= 2.""")
group.add_argument('--blast-max-target-seqs', '--max-target-seqs',
type=int, default=100000000, metavar='MAX',
help="""Maximum hit sequences per shard.
Default is calculated based on the available
memory and the number of shards.""")
group.add_argument('--blast-batch-size', '--batch-size', type=int,
help="""Use this option to control blast memory usage
and the concatenation of queries. Setting this
value too low can degrade performance.""")
def check_args(args):
"""Validate blast arguments."""
if args['blast_word_size'] and args['blast_word_size'] < 2:
sys.exit('--word-size must be >= 2.')
def default_max_target_seqs(log, max_target_seqs, blast_db, max_memory):
"""Calculate the default max_target_seqs per shard."""
if not max_target_seqs:
all_shards = all_shard_paths(log, blast_db)
max_target_seqs = int(2 * max_memory / len(all_shards)) * 1e6
return max_target_seqs
def default_shard_count(args, sra_files):
"""Calculate the default number of shards."""
shard_count = args['shard_count']
if not shard_count:
total_fasta_size = 0
for file_name in sra_files:
total_fasta_size += util.shard_file_size(args, file_name)
shard_count = int(total_fasta_size / 2.5e8)
shard_count = shard_count if shard_count else 1
return shard_count
def make_blast_output_dir(blast_db):
"""Make blast DB output directory."""
output_dir = dirname(blast_db)
if output_dir and output_dir not in ['.', '..']:
os.makedirs(output_dir, exist_ok=True)
def touchup_blast_db_names(blast_dbs):
"""Allow users to enter blast DB names with various suffixes."""
pattern = re.compile(
r'^ (.*?)'
r'( \.atram(_preprocessor)?\.log'
r' | \.blast_\d{3}\.(nhr|nin|nsq)'
r' | \.sqlite\.db )?$',
re.I | re.X)
db_names = []
for blast_db in blast_dbs:
db_names.append(re.sub(pattern, r'\1', blast_db))
return db_names
def find_program(program):
"""Make sure we can find the needed blast program."""
if not (which('makeblastdb') and which('tblastn') and which('blastn')):
err = ('We could not find the programs "{}". You either need to '
'install it or you need adjust the PATH environment variable '
'with the "--path" option so that aTRAM can '
'find it.').format(program)
sys.exit(err)
def parse_fasta_title(title, ends, seq_end_clamp):
"""Try to get the sequence name & which end it is from the fasta title."""
parts = title.split()
if not parts:
parts = ['']
match = re.match(r'(.+)[./_]([12])$', parts[0])
if match:
# seq_name = match.group(1)
seq_name = parts[0] if ends == 'single_ends' else match.group(1)
seq_end = match.group(2) if ends == 'mixed_ends' else seq_end_clamp
elif len(parts) > 1 and re.match(r'[12]$', parts[1]):
# seq_name = parts[0]
seq_name = ' '.join(parts[:2]) if ends == 'single_ends' else parts[0]
seq_end = parts[1] if ends == 'mixed_ends' else seq_end_clamp
else:
seq_name = parts[0]
seq_end = seq_end_clamp
return seq_name, seq_end
def parse_blast_title(title, is_single_end):
"""Try to get the sequence name & which end it is from the blast title."""
seq_name, seq_end = title, ''
match = re.match(r'(.+)[\s./_]([12])$', title)
if match and not is_single_end:
seq_name, seq_end = match.group(1), match.group(2)
return seq_name, seq_end
def set_blast_batch_size(batch_size):
"""Use this to control blast memory usage & query concatenation."""
if batch_size:
os.environ['BATCH_SIZE'] = str(batch_size)
| {
"repo_name": "juliema/aTRAM",
"path": "lib/blast.py",
"copies": "1",
"size": "8360",
"license": "bsd-3-clause",
"hash": 2570817105458704400,
"line_mean": 32.44,
"line_max": 80,
"alpha_frac": 0.5886363636,
"autogenerated": false,
"ratio": 3.5096557514693534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45982921150693534,
"avg_score": null,
"num_lines": null
} |
"""All blast commands used by aTRAM."""
import sys
import os
from os.path import basename, dirname, join
import re
import glob
import json
from shutil import which
from . import log
from . import util
def create_db(temp_dir, fasta_file, shard):
"""Create a blast database."""
cmd = 'makeblastdb -dbtype nucl -in {} -out {}'
cmd = cmd.format(fasta_file, shard)
log.subcommand(cmd, temp_dir)
def against_sra(args, state, hits_file, shard):
"""Blast the query sequences against an SRA blast database."""
cmd = []
if args['protein'] and state['iteration'] == 1:
cmd.append('tblastn')
cmd.append('-db_gencode {}'.format(args['db_gencode']))
else:
cmd.append('blastn')
cmd.append('-evalue {}'.format(args['evalue']))
cmd.append('-outfmt 15')
cmd.append('-max_target_seqs {}'.format(args['max_target_seqs']))
cmd.append('-out {}'.format(hits_file))
cmd.append('-db {}'.format(shard))
cmd.append('-query {}'.format(state['query_file']))
if args['word_size']:
cmd.append('-word_size {}'.format(args['word_size']))
command = ' '.join(cmd)
log.subcommand(command, args['temp_dir'], timeout=args['timeout'])
def against_contigs(blast_db, query_file, hits_file, **kwargs):
"""
Blast the query sequence against the contigs.
The blast output will have the scores for later processing.
"""
cmd = []
if kwargs['protein']:
cmd.append('tblastn')
cmd.append('-db_gencode {}'.format(kwargs['db_gencode']))
else:
cmd.append('blastn')
cmd.append('-db {}'.format(blast_db))
cmd.append('-query {}'.format(query_file))
cmd.append('-out {}'.format(hits_file))
cmd.append('-outfmt 15')
command = ' '.join(cmd)
log.subcommand(command, kwargs['temp_dir'], timeout=kwargs['timeout'])
def all_shard_paths(blast_db):
"""Get all of the BLAST shard names built by the preprocessor."""
pattern = '{}.*.blast.nhr'.format(blast_db)
files = glob.glob(pattern)
if not files:
err = ('No blast shards found. Looking for "{}"\n'
'Verify the --work-dir and --file-prefix options.').format(
pattern[:-4])
log.fatal(err)
return sorted(f[:-4] for f in files)
def output_file_name(temp_dir, shrd_path):
"""Create a file name for blast results."""
shard_name = basename(shrd_path)
file_name = '{}.results.json'.format(shard_name)
return join(temp_dir, file_name)
def temp_db_name(temp_dir, blast_db):
"""Generate a name for the temp DB used to filter the contigs."""
file_name = basename(blast_db)
return join(temp_dir, file_name)
def get_raw_hits(json_file):
"""Extract the raw blast hits from the blast json output file."""
with open(json_file) as blast_file:
raw = blast_file.read()
# Allow empty results
if not raw:
return []
# Do not allow bad json
try:
obj = json.loads(raw)
except json.decoder.JSONDecodeError:
err = ('Blast output is not in JSON format. '
'You may need to upgrade blast.')
log.fatal(err)
return obj['BlastOutput2'][0]['report']['results']['search'].get(
'hits', [])
def hits(json_file):
"""Extract the blast hits from the blast json output file."""
hits_list = []
raw_hits = get_raw_hits(json_file)
for raw in raw_hits:
for i, desc in enumerate(raw['description']):
hit = dict(desc)
hit['len'] = raw['len']
hit.update(raw['hsps'][i])
hits_list.append(hit)
return hits_list
def command_line_args(parser):
"""Add optional blast arguments to the command-line parser."""
group = parser.add_argument_group('optional blast arguments')
group.add_argument('--db-gencode', type=int, default=1,
metavar='CODE',
help="""The genetic code to use during blast runs.
The default is "1".""")
group.add_argument('--evalue', type=float, default=1e-10,
help="""The default evalue is "1e-10".""")
group.add_argument('--word-size', type=int,
help="""Word size for wordfinder algorithm.
'Must be >= 2.""")
group.add_argument('--max-target-seqs', type=int, default=100000000,
metavar='MAX',
help="""Maximum hit sequences per shard.
Default is calculated based on the available
memory and the number of shards.""")
group.add_argument('--batch-size', type=int,
help="""Use this option to control blast memory usage
and the concatenation of queries. Setting this
value too low can degrade performance.""")
def check_args(args):
"""Validate blast arguments."""
if args['word_size'] and args['word_size'] < 2:
sys.exit('--word-size must be >= 2.')
def default_max_target_seqs(max_target_seqs, blast_db, max_memory):
"""Calculate the default max_target_seqs per shard."""
if not max_target_seqs:
all_shards = all_shard_paths(blast_db)
max_target_seqs = int(2 * max_memory / len(all_shards)) * 1e6
return max_target_seqs
def default_shard_count(args, sra_files):
"""Calculate the default number of shards."""
shard_count = args['shard_count']
if not shard_count:
total_fasta_size = 0
for file_name in sra_files:
total_fasta_size += util.shard_file_size(args, file_name)
shard_count = int(total_fasta_size / 2.5e8)
shard_count = shard_count if shard_count else 1
return shard_count
def make_blast_output_dir(blast_db):
"""Make blast DB output directory."""
output_dir = dirname(blast_db)
if output_dir and output_dir not in ['.', '..']:
os.makedirs(output_dir, exist_ok=True)
def touchup_blast_db_names(blast_dbs):
"""Allow users to enter blast DB names with various suffixes."""
pattern = re.compile(
r'^ (.*?)'
r'( \.atram(_preprocessor)?\.log'
r' | \.blast_\d{3}\.(nhr|nin|nsq)'
r' | \.sqlite\.db )?$',
re.I | re.X)
db_names = []
for blast_db in blast_dbs:
db_names.append(re.sub(pattern, r'\1', blast_db))
return db_names
def find_program(program):
"""Make sure we can find the needed blast program."""
if not (which('makeblastdb') and which('tblastn') and which('blastn')):
err = ('We could not find the programs "{}". You either need to '
'install it or you need adjust the PATH environment variable '
'with the "--path" option so that aTRAM can '
'find it.').format(program)
sys.exit(err)
def parse_fasta_title(title, ends, seq_end_clamp):
"""Try to get the sequence name & which end it is from the fasta title."""
parts = title.split()
if not parts:
parts = ['']
match = re.match(r'(.+)[./_]([12])$', parts[0])
if match:
# seq_name = match.group(1)
seq_name = parts[0] if ends == 'single_ends' else match.group(1)
seq_end = match.group(2) if ends == 'mixed_ends' else seq_end_clamp
elif len(parts) > 1 and re.match(r'[12]$', parts[1]):
# seq_name = parts[0]
seq_name = ' '.join(parts[:2]) if ends == 'single_ends' else parts[0]
seq_end = parts[1] if ends == 'mixed_ends' else seq_end_clamp
else:
seq_name = parts[0]
seq_end = seq_end_clamp
return seq_name, seq_end
def parse_blast_title(title, is_single_end):
"""Try to get the sequence name & which end it is from the blast title."""
seq_name, seq_end = title, ''
match = re.match(r'(.+)[\s./_]([12])$', title)
if match and not is_single_end:
seq_name, seq_end = match.group(1), match.group(2)
return seq_name, seq_end
| {
"repo_name": "AntonelliLab/seqcap_processor",
"path": "bin/aTRAM-master/lib/blast.py",
"copies": "1",
"size": "8019",
"license": "mit",
"hash": -9126472458638096000,
"line_mean": 31.5975609756,
"line_max": 78,
"alpha_frac": 0.5838633246,
"autogenerated": false,
"ratio": 3.554521276595745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9638384601195744,
"avg_score": 0,
"num_lines": 246
} |
__all__ = ['BlockingIOError', 'BrokenPipeError', 'ChildProcessError',
'ConnectionRefusedError', 'ConnectionResetError',
'InterruptedError', 'ConnectionAbortedError', 'PermissionError',
'FileNotFoundError',
]
import errno
import select
import socket
import sys
try:
import ssl
except ImportError:
ssl = None
from .compat import PY33
if PY33:
import builtins
BlockingIOError = builtins.BlockingIOError
BrokenPipeError = builtins.BrokenPipeError
ChildProcessError = builtins.ChildProcessError
ConnectionRefusedError = builtins.ConnectionRefusedError
ConnectionResetError = builtins.ConnectionResetError
InterruptedError = builtins.InterruptedError
ConnectionAbortedError = builtins.ConnectionAbortedError
PermissionError = builtins.PermissionError
FileNotFoundError = builtins.FileNotFoundError
ProcessLookupError = builtins.ProcessLookupError
else:
# Python < 3.3
class BlockingIOError(OSError):
pass
class BrokenPipeError(OSError):
pass
class ChildProcessError(OSError):
pass
class ConnectionRefusedError(OSError):
pass
class InterruptedError(OSError):
pass
class ConnectionResetError(OSError):
pass
class ConnectionAbortedError(OSError):
pass
class PermissionError(OSError):
pass
class FileNotFoundError(OSError):
pass
class ProcessLookupError(OSError):
pass
_MAP_ERRNO = {
errno.EACCES: PermissionError,
errno.EAGAIN: BlockingIOError,
errno.EALREADY: BlockingIOError,
errno.ECHILD: ChildProcessError,
errno.ECONNABORTED: ConnectionAbortedError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ECONNRESET: ConnectionResetError,
errno.EINPROGRESS: BlockingIOError,
errno.EINTR: InterruptedError,
errno.ENOENT: FileNotFoundError,
errno.EPERM: PermissionError,
errno.EPIPE: BrokenPipeError,
errno.ESHUTDOWN: BrokenPipeError,
errno.EWOULDBLOCK: BlockingIOError,
errno.ESRCH: ProcessLookupError,
}
if sys.platform == 'win32':
from trollius import _overlapped
_MAP_ERRNO.update({
_overlapped.ERROR_CONNECTION_REFUSED: ConnectionRefusedError,
_overlapped.ERROR_CONNECTION_ABORTED: ConnectionAbortedError,
_overlapped.ERROR_NETNAME_DELETED: ConnectionResetError,
})
def get_error_class(key, default):
return _MAP_ERRNO.get(key, default)
if sys.version_info >= (3,):
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
exec("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def _wrap_error(exc, mapping, key):
if key not in mapping:
return
new_err_cls = mapping[key]
new_err = new_err_cls(*exc.args)
# raise a new exception with the original traceback
if hasattr(exc, '__traceback__'):
traceback = exc.__traceback__
else:
traceback = sys.exc_info()[2]
reraise(new_err_cls, new_err, traceback)
if not PY33:
def wrap_error(func, *args, **kw):
"""
Wrap socket.error, IOError, OSError, select.error to raise new specialized
exceptions of Python 3.3 like InterruptedError (PEP 3151).
"""
try:
return func(*args, **kw)
except (socket.error, IOError, OSError) as exc:
if ssl is not None and isinstance(exc, ssl.SSLError):
raise
if hasattr(exc, 'winerror'):
_wrap_error(exc, _MAP_ERRNO, exc.winerror)
# _MAP_ERRNO does not contain all Windows errors.
# For some errors like "file not found", exc.errno should
# be used (ex: ENOENT).
_wrap_error(exc, _MAP_ERRNO, exc.errno)
raise
except select.error as exc:
if exc.args:
_wrap_error(exc, _MAP_ERRNO, exc.args[0])
raise
else:
def wrap_error(func, *args, **kw):
return func(*args, **kw)
| {
"repo_name": "overcastcloud/trollius",
"path": "trollius/py33_exceptions.py",
"copies": "1",
"size": "4083",
"license": "apache-2.0",
"hash": -1427602448648978000,
"line_mean": 27.3541666667,
"line_max": 82,
"alpha_frac": 0.6571148665,
"autogenerated": false,
"ratio": 4.046580773042616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008366800535475235,
"num_lines": 144
} |
ALL = ['Block_Mapper']
class Block_Mapper:
"""
process data after receiving a block of records
"""
def __init__(self, blk_sz):
self.blk_sz = blk_sz
self.data = []
self.sz = 0
def __call__(self, records):
for row in records:
self.data.append(self.parse(row[1]))
self.sz += 1
if self.sz >= self.blk_sz:
for key, value in self.process():
yield key, value
self.data = []
self.sz = 0
if self.sz > 0:
for key, value in self.process():
yield key, value
for key, value in self.close():
yield key, value
def parse(self, row):
return row
def process(self):
return iter([])
def close(self):
return iter([])
class Block_Dumper(Block_Mapper):
"""
collect all data and send
"""
def __init__(self):
Block_Mapper.__init__(self, float('inf'))
def parse(self, row):
return row[1]
def close(self):
yield 0, self.data
| {
"repo_name": "chocjy/randomized-quantile-regression-solvers",
"path": "hadoop/src/utils.py",
"copies": "1",
"size": "1108",
"license": "apache-2.0",
"hash": -1643533356265847000,
"line_mean": 23.6222222222,
"line_max": 51,
"alpha_frac": 0.4864620939,
"autogenerated": false,
"ratio": 4.058608058608058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045070152508059,
"avg_score": null,
"num_lines": null
} |
__all__ = ('BlockStorageFile',)
import os
import struct
import logging
import errno
from multiprocessing.pool import ThreadPool
import pyoram
from pyoram.storage.block_storage import \
(BlockStorageInterface,
BlockStorageTypeFactory)
import tqdm
import six
from six.moves import xrange
log = logging.getLogger("pyoram")
class default_filesystem(object):
open = open
remove = os.remove
stat = os.stat
class BlockStorageFile(BlockStorageInterface):
"""
A class implementing the block storage interface
using a local file.
"""
_index_struct_string = "!LLL?"
_index_offset = struct.calcsize(_index_struct_string)
def __init__(self,
storage_name,
threadpool_size=None,
ignore_lock=False,
_filesystem=default_filesystem):
self._bytes_sent = 0
self._bytes_received = 0
self._filesystem = _filesystem
self._ignore_lock = ignore_lock
self._f = None
self._pool = None
self._close_pool = True
self._async_write = None
self._storage_name = storage_name
self._f = self._filesystem.open(self.storage_name, "r+b")
self._f.seek(0)
self._block_size, self._block_count, user_header_size, locked = \
struct.unpack(
BlockStorageFile._index_struct_string,
self._f.read(BlockStorageFile._index_offset))
if locked and (not self._ignore_lock):
self._f.close()
self._f = None
raise IOError(
"Can not open block storage device because it is "
"locked by another process. To ignore this check, "
"initialize this class with the keyword 'ignore_lock' "
"set to True.")
self._user_header_data = bytes()
if user_header_size > 0:
self._user_header_data = \
self._f.read(user_header_size)
self._header_offset = BlockStorageFile._index_offset + \
len(self._user_header_data)
# TODO: Figure out why this is required for Python3
# in order to prevent issues with the
# TopCachedEncryptedHeapStorage class. The
# problem has something to do with bufferedio,
# but it makes no sense why this fixes it (all
# we've done is read above these lines). As
# part of this, investigate whethor or not we
# need the call to flush after write_block(s),
# or if its simply connected to some Python3
# bug in bufferedio.
self._f.flush()
if not self._ignore_lock:
# turn on the locked flag
self._f.seek(0)
self._f.write(
struct.pack(BlockStorageFile._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
True))
self._f.flush()
if threadpool_size != 0:
self._pool = ThreadPool(threadpool_size)
def _check_async(self):
if self._async_write is not None:
self._async_write.get()
self._async_write = None
# TODO: Figure out why tests fail on Python3 without this
if six.PY3:
if self._f is None:
return
self._f.flush()
def _schedule_async_write(self, args, callback=None):
assert self._async_write is None
if self._pool is not None:
self._async_write = \
self._pool.apply_async(self._writev, (args, callback))
else:
self._writev(args, callback)
# This method is usually executed in another thread, so
# do not attempt to handle exceptions because it will
# not work.
def _writev(self, chunks, callback):
for i, block in chunks:
self._f.seek(self._header_offset + i * self.block_size)
self._f.write(block)
if callback is not None:
callback(i)
def _prep_for_close(self):
self._check_async()
if self._close_pool and (self._pool is not None):
self._pool.close()
self._pool.join()
self._pool = None
if self._f is not None:
if not self._ignore_lock:
# turn off the locked flag
self._f.seek(0)
self._f.write(
struct.pack(BlockStorageFile._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
False))
self._f.flush()
#
# Define BlockStorageInterface Methods
#
def clone_device(self):
f = BlockStorageFile(self.storage_name,
threadpool_size=0,
ignore_lock=True)
f._pool = self._pool
f._close_pool = False
return f
@classmethod
def compute_storage_size(cls,
block_size,
block_count,
header_data=None,
ignore_header=False):
assert (block_size > 0) and (block_size == int(block_size))
assert (block_count > 0) and (block_count == int(block_count))
if header_data is None:
header_data = bytes()
if ignore_header:
return block_size * block_count
else:
return BlockStorageFile._index_offset + \
len(header_data) + \
block_size * block_count
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
initialize=None,
header_data=None,
ignore_existing=False,
threadpool_size=None,
_filesystem=default_filesystem):
if (not ignore_existing):
_exists = True
try:
_filesystem.stat(storage_name)
except OSError as e:
if e.errno == errno.ENOENT:
_exists = False
if _exists:
raise IOError(
"Storage location already exists: %s"
% (storage_name))
if (block_size <= 0) or (block_size != int(block_size)):
raise ValueError(
"Block size (bytes) must be a positive integer: %s"
% (block_size))
if (block_count <= 0) or (block_count != int(block_count)):
raise ValueError(
"Block count must be a positive integer: %s"
% (block_count))
if (header_data is not None) and \
(type(header_data) is not bytes):
raise TypeError(
"'header_data' must be of type bytes. "
"Invalid type: %s" % (type(header_data)))
if initialize is None:
zeros = bytes(bytearray(block_size))
initialize = lambda i: zeros
try:
with _filesystem.open(storage_name, "wb") as f:
# create_index
if header_data is None:
f.write(struct.pack(BlockStorageFile._index_struct_string,
block_size,
block_count,
0,
False))
else:
f.write(struct.pack(BlockStorageFile._index_struct_string,
block_size,
block_count,
len(header_data),
False))
f.write(header_data)
with tqdm.tqdm(total=block_count*block_size,
desc="Initializing File Block Storage Space",
unit="B",
unit_scale=True,
disable=not pyoram.config.SHOW_PROGRESS_BAR) as progress_bar:
for i in xrange(block_count):
block = initialize(i)
assert len(block) == block_size, \
("%s != %s" % (len(block), block_size))
f.write(block)
progress_bar.update(n=block_size)
except: # pragma: no cover
_filesystem.remove(storage_name) # pragma: no cover
raise # pragma: no cover
return BlockStorageFile(storage_name,
threadpool_size=threadpool_size,
_filesystem=_filesystem)
@property
def header_data(self):
return self._user_header_data
@property
def block_count(self):
return self._block_count
@property
def block_size(self):
return self._block_size
@property
def storage_name(self):
return self._storage_name
def update_header_data(self, new_header_data):
self._check_async()
if len(new_header_data) != len(self.header_data):
raise ValueError(
"The size of header data can not change.\n"
"Original bytes: %s\n"
"New bytes: %s" % (len(self.header_data),
len(new_header_data)))
self._user_header_data = bytes(new_header_data)
self._f.seek(BlockStorageFile._index_offset)
self._f.write(self._user_header_data)
def close(self):
self._prep_for_close()
if self._f is not None:
try:
self._f.close()
except OSError: # pragma: no cover
pass # pragma: no cover
self._f = None
def read_blocks(self, indices):
self._check_async()
blocks = []
for i in indices:
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
self._f.seek(self._header_offset + i * self.block_size)
blocks.append(self._f.read(self.block_size))
return blocks
def yield_blocks(self, indices):
self._check_async()
for i in indices:
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
self._f.seek(self._header_offset + i * self.block_size)
yield self._f.read(self.block_size)
def read_block(self, i):
self._check_async()
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
self._f.seek(self._header_offset + i * self.block_size)
return self._f.read(self.block_size)
def write_blocks(self, indices, blocks, callback=None):
self._check_async()
chunks = []
for i, block in zip(indices, blocks):
assert 0 <= i < self.block_count
assert len(block) == self.block_size, \
("%s != %s" % (len(block), self.block_size))
self._bytes_sent += self.block_size
chunks.append((i, block))
self._schedule_async_write(chunks, callback=callback)
def write_block(self, i, block):
self._check_async()
assert 0 <= i < self.block_count
assert len(block) == self.block_size
self._bytes_sent += self.block_size
self._schedule_async_write(((i, block),))
@property
def bytes_sent(self):
return self._bytes_sent
@property
def bytes_received(self):
return self._bytes_received
BlockStorageTypeFactory.register_device("file", BlockStorageFile)
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/storage/block_storage_file.py",
"copies": "1",
"size": "12045",
"license": "mit",
"hash": 5361524853358815000,
"line_mean": 35.0628742515,
"line_max": 92,
"alpha_frac": 0.502864259,
"autogenerated": false,
"ratio": 4.43646408839779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.543932834739779,
"avg_score": null,
"num_lines": null
} |
__all__ = ('BlockStorageMMap',)
import logging
import mmap
from pyoram.storage.block_storage import \
BlockStorageTypeFactory
from pyoram.storage.block_storage_file import \
BlockStorageFile
log = logging.getLogger("pyoram")
class _BlockStorageMemoryImpl(object):
"""
This class implementents the BlockStorageInterface read/write
methods for classes with a private attribute _f that can be
accessed using __getslice__/__setslice__ notation.
"""
def read_blocks(self, indices):
blocks = []
for i in indices:
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
pos_start = self._header_offset + i * self.block_size
pos_stop = pos_start + self.block_size
blocks.append(self._f[pos_start:pos_stop])
return blocks
def yield_blocks(self, indices):
for i in indices:
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
pos_start = self._header_offset + i * self.block_size
pos_stop = pos_start + self.block_size
yield self._f[pos_start:pos_stop]
def read_block(self, i):
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
pos_start = self._header_offset + i * self.block_size
pos_stop = pos_start + self.block_size
return self._f[pos_start:pos_stop]
def write_blocks(self, indices, blocks, callback=None):
for i, block in zip(indices, blocks):
assert 0 <= i < self.block_count
self._bytes_sent += self.block_size
pos_start = self._header_offset + i * self.block_size
pos_stop = pos_start + self.block_size
self._f[pos_start:pos_stop] = block
if callback is not None:
callback(i)
def write_block(self, i, block):
assert 0 <= i < self.block_count
self._bytes_sent += self.block_size
pos_start = self._header_offset + i * self.block_size
pos_stop = pos_start + self.block_size
self._f[pos_start:pos_stop] = block
class BlockStorageMMap(_BlockStorageMemoryImpl,
BlockStorageFile):
"""
A class implementing the block storage interface by creating a
memory map over a local file. This class uses the same storage
format as BlockStorageFile. Thus, a block storage space can be
created using this class and then, after saving the raw storage
data to disk, reopened with any other class compatible with
BlockStorageFile (and visa versa).
"""
def __init__(self, *args, **kwds):
mm = kwds.pop('mm', None)
self._mmap_owned = True
super(BlockStorageMMap, self).__init__(*args, **kwds)
if mm is None:
self._f.flush()
mm = mmap.mmap(self._f.fileno(), 0)
else:
self._mmap_owned = False
self._f.close()
self._f = mm
#
# Define BlockStorageInterface Methods
# (override what is defined on BlockStorageFile)
#
#@classmethod
#def compute_storage_size(...)
def clone_device(self):
f = BlockStorageMMap(self.storage_name,
threadpool_size=0,
mm=self._f,
ignore_lock=True)
f._pool = self._pool
f._close_pool = False
return f
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
**kwds):
f = BlockStorageFile.setup(storage_name,
block_size,
block_count,
**kwds)
f.close()
return BlockStorageMMap(storage_name)
#def update_header_data(...)
def close(self):
self._prep_for_close()
if self._f is not None:
if self._mmap_owned:
try:
self._f.close()
except OSError: # pragma: no cover
pass # pragma: no cover
self._f = None
#def read_blocks(...)
#def yield_blocks(...)
#def read_block(...)
#def write_blocks(...)
#def write_block(...)
#@property
#def bytes_sent(...)
#@property
#def bytes_received(...)
BlockStorageTypeFactory.register_device("mmap", BlockStorageMMap)
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/storage/block_storage_mmap.py",
"copies": "1",
"size": "4515",
"license": "mit",
"hash": 8462087366224320000,
"line_mean": 30.5734265734,
"line_max": 73,
"alpha_frac": 0.5530454042,
"autogenerated": false,
"ratio": 4.138405132905591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004162778285121458,
"num_lines": 143
} |
__all__ = ('BlockStorageRAM',)
import os
import struct
import logging
import errno
from multiprocessing.pool import ThreadPool
import pyoram
from pyoram.storage.block_storage import \
(BlockStorageInterface,
BlockStorageTypeFactory)
from pyoram.storage.block_storage_mmap import \
(BlockStorageMMap,
_BlockStorageMemoryImpl)
import tqdm
import six
from six.moves import xrange
log = logging.getLogger("pyoram")
class BlockStorageRAM(_BlockStorageMemoryImpl,
BlockStorageInterface):
"""
A class implementing the block storage interface where all data is
kept in RAM. This class uses the same storage format as
BlockStorageFile. Thus, a block storage space can be created using
this class and then, after saving the raw storage data to disk,
reopened with any other class compatible with BlockStorageFile
(and visa versa).
"""
_index_struct_string = BlockStorageMMap._index_struct_string
_index_offset = struct.calcsize(_index_struct_string)
def __init__(self,
storage_data,
threadpool_size=None,
ignore_lock=False):
self._bytes_sent = 0
self._bytes_received = 0
self._ignore_lock = ignore_lock
self._f = None
self._pool = None
self._close_pool = True
if type(storage_data) is not bytearray:
raise TypeError(
"BlockStorageRAM requires input argument of type "
"'bytearray'. Invalid input type: %s"
% (type(storage_data)))
self._f = storage_data
self._block_size, self._block_count, user_header_size, locked = \
struct.unpack(
BlockStorageRAM._index_struct_string,
self._f[:BlockStorageRAM._index_offset])
if locked and (not self._ignore_lock):
raise IOError(
"Can not open block storage device because it is "
"locked by another process. To ignore this check, "
"initialize this class with the keyword 'ignore_lock' "
"set to True.")
self._user_header_data = bytes()
if user_header_size > 0:
self._user_header_data = \
bytes(self._f[BlockStorageRAM._index_offset:\
(BlockStorageRAM._index_offset+user_header_size)])
assert len(self._user_header_data) == user_header_size
self._header_offset = BlockStorageRAM._index_offset + \
len(self._user_header_data)
if not self._ignore_lock:
# turn on the locked flag
self._f[:BlockStorageRAM._index_offset] = \
struct.pack(BlockStorageRAM._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
True)
# Although we do not use the threadpool we still
# create just in case we are the first
if threadpool_size != 0:
self._pool = ThreadPool(threadpool_size)
#
# Add some methods specific to BlockStorageRAM
#
@staticmethod
def fromfile(file_,
threadpool_size=None,
ignore_lock=False):
"""
Instantiate BlockStorageRAM device from a file saved in block
storage format. The file_ argument can be a file object or a
string that represents a filename. If called with a file
object, it should be opened in binary mode, and the caller is
responsible for closing the file.
This method returns a BlockStorageRAM instance.
"""
close_file = False
if not hasattr(file_, 'read'):
file_ = open(file_, 'rb')
close_file = True
try:
header_data = file_.read(BlockStorageRAM._index_offset)
block_size, block_count, user_header_size, locked = \
struct.unpack(
BlockStorageRAM._index_struct_string,
header_data)
if locked and (not ignore_lock):
raise IOError(
"Can not open block storage device because it is "
"locked by another process. To ignore this check, "
"call this method with the keyword 'ignore_lock' "
"set to True.")
header_offset = len(header_data) + \
user_header_size
f = bytearray(header_offset + \
(block_size * block_count))
f[:header_offset] = header_data + file_.read(user_header_size)
f[header_offset:] = file_.read(block_size * block_count)
finally:
if close_file:
file_.close()
return BlockStorageRAM(f,
threadpool_size=threadpool_size,
ignore_lock=ignore_lock)
def tofile(self, file_):
"""
Dump all storage data to a file. The file_ argument can be a
file object or a string that represents a filename. If called
with a file object, it should be opened in binary mode, and
the caller is responsible for closing the file.
The method should only be called after the storage device has
been closed to ensure that the locked flag has been set to
False.
"""
close_file = False
if not hasattr(file_, 'write'):
file_ = open(file_, 'wb')
close_file = True
file_.write(self._f)
if close_file:
file_.close()
@property
def data(self):
"""Access the raw bytearray"""
return self._f
#
# Define BlockStorageInterface Methods
#
def clone_device(self):
f = BlockStorageRAM(self._f,
threadpool_size=0,
ignore_lock=True)
f._pool = self._pool
f._close_pool = False
return f
@classmethod
def compute_storage_size(cls, *args, **kwds):
return BlockStorageMMap.compute_storage_size(*args, **kwds)
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
initialize=None,
header_data=None,
ignore_existing=False,
threadpool_size=None):
# We ignore the 'storage_name' argument
# We ignore the 'ignore_existing' flag
if (block_size <= 0) or (block_size != int(block_size)):
raise ValueError(
"Block size (bytes) must be a positive integer: %s"
% (block_size))
if (block_count <= 0) or (block_count != int(block_count)):
raise ValueError(
"Block count must be a positive integer: %s"
% (block_count))
if (header_data is not None) and \
(type(header_data) is not bytes):
raise TypeError(
"'header_data' must be of type bytes. "
"Invalid type: %s" % (type(header_data)))
if initialize is None:
zeros = bytes(bytearray(block_size))
initialize = lambda i: zeros
# create_index
index_data = None
if header_data is None:
index_data = struct.pack(BlockStorageRAM._index_struct_string,
block_size,
block_count,
0,
False)
header_data = bytes()
else:
index_data = struct.pack(BlockStorageRAM._index_struct_string,
block_size,
block_count,
len(header_data),
False)
header_offset = len(index_data) + len(header_data)
f = bytearray(header_offset + \
(block_size * block_count))
f[:header_offset] = index_data + header_data
progress_bar = tqdm.tqdm(total=block_count*block_size,
desc="Initializing File Block Storage Space",
unit="B",
unit_scale=True,
disable=not pyoram.config.SHOW_PROGRESS_BAR)
for i in xrange(block_count):
block = initialize(i)
assert len(block) == block_size, \
("%s != %s" % (len(block), block_size))
pos_start = header_offset + i * block_size
pos_start = header_offset + i * block_size
pos_stop = pos_start + block_size
f[pos_start:pos_stop] = block[:]
progress_bar.update(n=block_size)
progress_bar.close()
return BlockStorageRAM(f, threadpool_size=threadpool_size)
@property
def header_data(self):
return self._user_header_data
@property
def block_count(self):
return self._block_count
@property
def block_size(self):
return self._block_size
@property
def storage_name(self):
return None
def update_header_data(self, new_header_data):
if len(new_header_data) != len(self.header_data):
raise ValueError(
"The size of header data can not change.\n"
"Original bytes: %s\n"
"New bytes: %s" % (len(self.header_data),
len(new_header_data)))
self._user_header_data = bytes(new_header_data)
self._f[BlockStorageRAM._index_offset:\
(BlockStorageRAM._index_offset+len(new_header_data))] = \
self._user_header_data
def close(self):
if self._close_pool and (self._pool is not None):
self._pool.close()
self._pool.join()
self._pool = None
if not self._ignore_lock:
# turn off the locked flag
self._f[:BlockStorageRAM._index_offset] = \
struct.pack(BlockStorageRAM._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
False)
self._ignore_lock = True
#
# We must cast from bytearray to bytes
# when reading from a bytearray so that this
# class works with the encryption layer.
#
def read_blocks(self, indices):
return [bytes(block) for block
in super(BlockStorageRAM, self).read_blocks(indices)]
def yield_blocks(self, indices):
for block in super(BlockStorageRAM, self).yield_blocks(indices):
yield bytes(block)
def read_block(self, i):
return bytes(super(BlockStorageRAM, self).read_block(i))
#def write_blocks(...)
#def write_block(...)
@property
def bytes_sent(self):
return self._bytes_sent
@property
def bytes_received(self):
return self._bytes_received
BlockStorageTypeFactory.register_device("ram", BlockStorageRAM)
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/storage/block_storage_ram.py",
"copies": "1",
"size": "11264",
"license": "mit",
"hash": 6271770415012598000,
"line_mean": 34.872611465,
"line_max": 80,
"alpha_frac": 0.5403941761,
"autogenerated": false,
"ratio": 4.484076433121019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008703481974817793,
"num_lines": 314
} |
__all__ = ('BlockStorageS3',)
import struct
import logging
from multiprocessing.pool import ThreadPool
import pyoram
from pyoram.storage.block_storage import \
(BlockStorageInterface,
BlockStorageTypeFactory)
from pyoram.storage.boto3_s3_wrapper import Boto3S3Wrapper
import tqdm
import six
from six.moves import xrange, map
log = logging.getLogger("pyoram")
class BlockStorageS3(BlockStorageInterface):
"""
A block storage device for Amazon Simple
Storage Service (S3).
"""
_index_name = "PyORAMBlockStorageS3_index.bin"
_index_struct_string = "!LLL?"
_index_offset = struct.calcsize(_index_struct_string)
def __init__(self,
storage_name,
bucket_name=None,
aws_access_key_id=None,
aws_secret_access_key=None,
region_name=None,
ignore_lock=False,
threadpool_size=None,
s3_wrapper=Boto3S3Wrapper):
self._bytes_sent = 0
self._bytes_received = 0
self._storage_name = storage_name
self._bucket_name = bucket_name
self._aws_access_key_id = aws_access_key_id
self._aws_secret_access_key = aws_secret_access_key
self._region_name = region_name
self._pool = None
self._close_pool = True
self._s3 = None
self._ignore_lock = ignore_lock
self._async_write = None
self._async_write_callback = None
if bucket_name is None:
raise ValueError("'bucket_name' keyword is required")
if threadpool_size != 0:
self._pool = ThreadPool(threadpool_size)
self._s3 = s3_wrapper(bucket_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name)
self._basename = self.storage_name+"/b%d"
index_data = self._s3.download(
self._storage_name+"/"+BlockStorageS3._index_name)
self._block_size, self._block_count, user_header_size, locked = \
struct.unpack(
BlockStorageS3._index_struct_string,
index_data[:BlockStorageS3._index_offset])
if locked and (not self._ignore_lock):
raise IOError(
"Can not open block storage device because it is "
"locked by another process. To ignore this check, "
"initialize this class with the keyword 'ignore_lock' "
"set to True.")
self._user_header_data = bytes()
if user_header_size > 0:
self._user_header_data = \
index_data[BlockStorageS3._index_offset:
(BlockStorageS3._index_offset+user_header_size)]
if not self._ignore_lock:
# turn on the locked flag
self._s3.upload((self._storage_name+"/"+BlockStorageS3._index_name,
struct.pack(BlockStorageS3._index_struct_string,
self.block_size,
self.block_count,
len(self.header_data),
True) + \
self.header_data))
def _check_async(self):
if self._async_write is not None:
for i in self._async_write:
if self._async_write_callback is not None:
self._async_write_callback(i)
self._async_write = None
self._async_write_callback = None
def _schedule_async_write(self, arglist, callback=None):
assert self._async_write is None
if self._pool is not None:
self._async_write = \
self._pool.imap_unordered(self._s3.upload, arglist)
self._async_write_callback = callback
else:
# Note: we are using six.map which always
# behaves like imap
for i in map(self._s3.upload, arglist):
if callback is not None:
callback(i)
def _download(self, i):
return self._s3.download(self._basename % i)
#
# Define BlockStorageInterface Methods
#
def clone_device(self):
f = BlockStorageS3(self.storage_name,
bucket_name=self._bucket_name,
aws_access_key_id=self._aws_access_key_id,
aws_secret_access_key=self._aws_secret_access_key,
region_name=self._region_name,
threadpool_size=0,
s3_wrapper=type(self._s3),
ignore_lock=True)
f._pool = self._pool
f._close_pool = False
return f
@classmethod
def compute_storage_size(cls,
block_size,
block_count,
header_data=None,
ignore_header=False):
assert (block_size > 0) and (block_size == int(block_size))
assert (block_count > 0) and (block_count == int(block_count))
if header_data is None:
header_data = bytes()
if ignore_header:
return block_size * block_count
else:
return BlockStorageS3._index_offset + \
len(header_data) + \
block_size * block_count
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
bucket_name=None,
aws_access_key_id=None,
aws_secret_access_key=None,
region_name=None,
header_data=None,
initialize=None,
threadpool_size=None,
ignore_existing=False,
s3_wrapper=Boto3S3Wrapper):
if bucket_name is None:
raise ValueError("'bucket_name' is required")
if (block_size <= 0) or (block_size != int(block_size)):
raise ValueError(
"Block size (bytes) must be a positive integer: %s"
% (block_size))
if (block_count <= 0) or (block_count != int(block_count)):
raise ValueError(
"Block count must be a positive integer: %s"
% (block_count))
if (header_data is not None) and \
(type(header_data) is not bytes):
raise TypeError(
"'header_data' must be of type bytes. "
"Invalid type: %s" % (type(header_data)))
pool = None
if threadpool_size != 0:
pool = ThreadPool(threadpool_size)
s3 = s3_wrapper(bucket_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name)
exists = s3.exists(storage_name)
if (not ignore_existing) and exists:
raise IOError(
"Storage location already exists in bucket %s: %s"
% (bucket_name, storage_name))
if exists:
log.info("Deleting objects in existing S3 entry: %s/%s"
% (bucket_name, storage_name))
print("Clearing Existing S3 Objects With Prefix %s/%s/"
% (bucket_name, storage_name))
s3.clear(storage_name, threadpool=pool)
if header_data is None:
s3.upload((storage_name+"/"+BlockStorageS3._index_name,
struct.pack(BlockStorageS3._index_struct_string,
block_size,
block_count,
0,
False)))
else:
s3.upload((storage_name+"/"+BlockStorageS3._index_name,
struct.pack(BlockStorageS3._index_struct_string,
block_size,
block_count,
len(header_data),
False) + \
header_data))
if initialize is None:
zeros = bytes(bytearray(block_size))
initialize = lambda i: zeros
basename = storage_name+"/b%d"
# NOTE: We will not be informed when a thread
# encounters an exception (e.g., when
# calling initialize(i). We must ensure
# that all iterations were processed
# by counting the results.
def init_blocks():
for i in xrange(block_count):
yield (basename % i, initialize(i))
def _do_upload(arg):
try:
s3.upload(arg)
except Exception as e: # pragma: no cover
log.error( # pragma: no cover
"An exception occured during S3 " # pragma: no cover
"setup when calling the block " # pragma: no cover
"initialization function: %s" # pragma: no cover
% (str(e))) # pragma: no cover
raise # pragma: no cover
total = None
progress_bar = tqdm.tqdm(total=block_count*block_size,
desc="Initializing S3 Block Storage Space",
unit="B",
unit_scale=True,
disable=not pyoram.config.SHOW_PROGRESS_BAR)
if pool is not None:
try:
for i,_ in enumerate(
pool.imap_unordered(_do_upload, init_blocks())):
total = i
progress_bar.update(n=block_size)
except Exception as e: # pragma: no cover
s3.clear(storage_name) # pragma: no cover
raise # pragma: no cover
finally:
progress_bar.close()
pool.close()
pool.join()
else:
try:
for i,_ in enumerate(
map(s3.upload, init_blocks())):
total = i
progress_bar.update(n=block_size)
except Exception as e: # pragma: no cover
s3.clear(storage_name) # pragma: no cover
raise # pragma: no cover
finally:
progress_bar.close()
if total != block_count - 1:
s3.clear(storage_name) # pragma: no cover
if pool is not None: # pragma: no cover
pool.close() # pragma: no cover
pool.join() # pragma: no cover
raise ValueError( # pragma: no cover
"Something went wrong during S3 block" # pragma: no cover
" initialization. Check the logger " # pragma: no cover
"output for more information.") # pragma: no cover
return BlockStorageS3(storage_name,
bucket_name=bucket_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name,
threadpool_size=threadpool_size,
s3_wrapper=s3_wrapper)
@property
def header_data(self):
return self._user_header_data
@property
def block_count(self):
return self._block_count
@property
def block_size(self):
return self._block_size
@property
def storage_name(self):
return self._storage_name
def update_header_data(self, new_header_data):
self._check_async()
if len(new_header_data) != len(self.header_data):
raise ValueError(
"The size of header data can not change.\n"
"Original bytes: %s\n"
"New bytes: %s" % (len(self.header_data),
len(new_header_data)))
self._user_header_data = new_header_data
index_data = bytearray(self._s3.download(
self._storage_name+"/"+BlockStorageS3._index_name))
lenbefore = len(index_data)
index_data[BlockStorageS3._index_offset:] = new_header_data
assert lenbefore == len(index_data)
self._s3.upload((self._storage_name+"/"+BlockStorageS3._index_name,
bytes(index_data)))
def close(self):
self._check_async()
if self._s3 is not None:
if not self._ignore_lock:
# turn off the locked flag
self._s3.upload(
(self._storage_name+"/"+BlockStorageS3._index_name,
struct.pack(BlockStorageS3._index_struct_string,
self.block_size,
self.block_count,
len(self.header_data),
False) + \
self.header_data))
if self._close_pool and (self._pool is not None):
self._pool.close()
self._pool.join()
self._pool = None
def read_blocks(self, indices):
self._check_async()
# be sure not to exhaust this if it is an iterator
# or generator
indices = list(indices)
assert all(0 <= i <= self.block_count for i in indices)
self._bytes_received += self.block_size * len(indices)
if self._pool is not None:
return self._pool.map(self._download, indices)
else:
return list(map(self._download, indices))
def yield_blocks(self, indices):
self._check_async()
# be sure not to exhaust this if it is an iterator
# or generator
indices = list(indices)
assert all(0 <= i <= self.block_count for i in indices)
self._bytes_received += self.block_size * len(indices)
if self._pool is not None:
return self._pool.imap(self._download, indices)
else:
return map(self._download, indices)
def read_block(self, i):
self._check_async()
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
return self._download(i)
def write_blocks(self, indices, blocks, callback=None):
self._check_async()
# be sure not to exhaust this if it is an iterator
# or generator
indices = list(indices)
assert all(0 <= i <= self.block_count for i in indices)
self._bytes_sent += self.block_size * len(indices)
indices = (self._basename % i for i in indices)
self._schedule_async_write(zip(indices, blocks),
callback=callback)
def write_block(self, i, block):
self._check_async()
assert 0 <= i < self.block_count
self._bytes_sent += self.block_size
self._schedule_async_write((((self._basename % i), block),))
@property
def bytes_sent(self):
return self._bytes_sent
@property
def bytes_received(self):
return self._bytes_received
BlockStorageTypeFactory.register_device("s3", BlockStorageS3)
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/storage/block_storage_s3.py",
"copies": "1",
"size": "15725",
"license": "mit",
"hash": -1852882947866466300,
"line_mean": 38.7095959596,
"line_max": 79,
"alpha_frac": 0.4992686804,
"autogenerated": false,
"ratio": 4.45846328324355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.545773196364355,
"avg_score": null,
"num_lines": null
} |
__all__ = ('BlockStorageSFTP',)
import logging
from pyoram.util.misc import chunkiter
from pyoram.storage.block_storage import \
BlockStorageTypeFactory
from pyoram.storage.block_storage_file import \
BlockStorageFile
log = logging.getLogger("pyoram")
class BlockStorageSFTP(BlockStorageFile):
"""
A block storage device for accessing file data through
an SSH portal using Secure File Transfer Protocol (SFTP).
"""
def __init__(self,
storage_name,
sshclient=None,
**kwds):
if sshclient is None:
raise ValueError(
"Can not open sftp block storage device "
"without an ssh client.")
super(BlockStorageSFTP, self).__init__(
storage_name,
_filesystem=sshclient.open_sftp(),
**kwds)
self._sshclient = sshclient
self._f.set_pipelined()
#
# Define BlockStorageInterface Methods
#
def clone_device(self):
f = BlockStorageSFTP(self.storage_name,
sshclient=self._sshclient,
threadpool_size=0,
ignore_lock=True)
f._pool = self._pool
f._close_pool = False
return f
#@classmethod
#def compute_storage_size(...)
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
sshclient=None,
threadpool_size=None,
**kwds):
if sshclient is None:
raise ValueError(
"Can not setup sftp block storage device "
"without an ssh client.")
with BlockStorageFile.setup(storage_name,
block_size,
block_count,
_filesystem=sshclient.open_sftp(),
threadpool_size=threadpool_size,
**kwds) as f:
pass
f._filesystem.close()
return BlockStorageSFTP(storage_name,
sshclient=sshclient,
threadpool_size=threadpool_size)
#@property
#def header_data(...)
#@property
#def block_count(...)
#@property
#def block_size(...)
#@property
#def storage_name(...)
#def update_header_data(...)
def close(self):
super(BlockStorageSFTP, self).close()
self._filesystem.close()
def read_blocks(self, indices):
self._check_async()
args = []
for i in indices:
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
args.append((self._header_offset + i * self.block_size,
self.block_size))
return self._f.readv(args)
def yield_blocks(self, indices, chunksize=100):
for chunk in chunkiter(indices, n=chunksize):
assert all(0 <= i <= self.block_count for i in chunk)
self._bytes_received += self.block_size * len(chunk)
args = [(self._header_offset + i * self.block_size,
self.block_size)
for i in chunk]
for block in self._f.readv(args):
yield block
#def read_block(...)
#def write_blocks(...)
#def write_block(...)
#@property
#def bytes_sent(...)
#@property
#def bytes_received(...)
BlockStorageTypeFactory.register_device("sftp", BlockStorageSFTP)
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/storage/block_storage_sftp.py",
"copies": "1",
"size": "3598",
"license": "mit",
"hash": 5837934384441188000,
"line_mean": 27.5555555556,
"line_max": 70,
"alpha_frac": 0.5208449138,
"autogenerated": false,
"ratio": 4.531486146095718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5552331059895719,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BloomFilter']
class BitBloomFilter(object):
def __init__(self, m=1024, k=3):
self.m = m
self.k = k
self.items = [0] * m
def __repr__(self):
return '<BloomFilter {}>'.format(self.items)
def add(self, item):
h = hash(item)
for i in range(self.k):
p = (h >> i) % self.m
self.items[p] = 1
def remove(self, item):
h = hash(item)
for i in range(self.k):
p = (h >> i) % self.m
self.items[p] = 0
def has(self, item):
h = hash(item)
for i in range(self.k):
p = (h >> i) % self.m
if not self.items[p]:
return False
return True
class IntBloomFilter(object):
def __init__(self, m=1024, k=3):
self.m = m
self.k = k
self.items = [0] * m
def __repr__(self):
return '<BloomFilter {}>'.format(self.items)
def add(self, item):
h = hash(item)
for i in range(self.k):
p = (h >> i) % self.m
self.items[p] += 1
def remove(self, item):
h = hash(item)
for i in range(self.k):
p = (h >> i) % self.m
if not self.items[p]:
raise ValueError('Bloom filter\'s field is ZERO. This item was not added before.')
self.items[p] -= 1
def has(self, item):
h = hash(item)
for i in range(self.k):
p = (h >> i) % self.m
if not self.items[p]:
return False
return True
if __name__ == '__main__':
# bit
bf = BitBloomFilter(16)
bf.add((1, '1'))
bf.add((2, '2'))
print(bf)
print(bf.has((0, '0')))
print(bf.has((1, '1')))
# int
bf = IntBloomFilter(16)
bf.add((1, '1'))
bf.add((2, '2'))
print(bf)
print(bf.has((0, '0')))
print(bf.has((1, '1')))
| {
"repo_name": "mtasic85/bloomfilter",
"path": "bloomfilter.py",
"copies": "1",
"size": "1946",
"license": "mit",
"hash": 2475595161654776000,
"line_mean": 20.6222222222,
"line_max": 98,
"alpha_frac": 0.4398766701,
"autogenerated": false,
"ratio": 3.216528925619835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4156405595719835,
"avg_score": null,
"num_lines": null
} |
ALL_BLUE_MOUNTAINS_SERVICES = range(9833, 9848)
INBOUND_BLUE_MOUNTAINS_SERVICES = (9833, 9835, 9838, 9840, 9841, 9843, 9844,
9847)
YELLOW_LINE_SERVICES = [9901, 9903, 9904, 9906, 9908, 9909, 9911, 9964, 9965,
9966, 9967, 9968, 9969, 9972, 9973, 9974]
# 9847 has a hornsby to springwood service thrown in for good measure :-(
#INBOUND_BLUE_MOUNTAINS_SERVICES = (9833, 9835, 9838, 9840, 9841, 9843, 9844)
TEST_SERVICES = (9843,)
#SERVICE_LIST = YELLOW_LINE_SERVICES + ALL_BLUE_MOUNTAINS_SERVICES
#SERVICE_LIST = ALL_BLUE_MOUNTAINS_SERVICES
LITHGOW_TO_CENTRAL_ORIGINS = ("Lithgow Station",
"Mount Victoria Station",
"Katoomba Station",
"Springwood Station")
CENTRAL_TO_LITHGOW_ORIGINS = ("Central Station", "Hornsby Station",)
PENRITH_TO_HORNSBY_ORIGINS = ("Emu Plains Station",
"Penrith Station",
"Richmond Station",
"Blacktown Station",
"Quakers Hill Station")
HORNSBY_TO_PENRITH_ORIGINS = ("Berowra Station",
"Hornsby Station",
"Gordon Station",
"North Sydney Station",
"Wyong Station",
"Lindfield Station")
INTERCHANGE_LINE_NAME = "Dummy Interchange Line"
LINE_NAMES = ["Blue Mountains - Lithgow to Central",
"Blue Mountains - Central to Lithgow",
"Western Line - Penrif to Hornsby",
"Western Line - Hornsby to Penrif",
]
# List the stations that are on each line
# TODO: Generate this automatically?
INTERCHANGE_STATION_MAP = {
"Emu Plains Station": (1, 2, 3, 4),
"Penrith Station": (1, 2, 3, 4),
"Blacktown Station": (1, 2, 3, 4),
"Westmead Station": (1, 2, 3, 4),
"Parramatta Station": (1, 2, 3, 4),
"Granville Station": (1, 2, 3, 4),
"Lidcombe Station": (1, 2, 3, 4),
"Strathfield Station": (1, 2, 3, 4),
"Burwood Station": (3, 4),
"Redfern Station": (1, 2, 3, 4),
"Central Station": (1, 2, 3, 4),
"Town Hall Station": (3, 4),
"Wynyard Station": (3, 4),
"North Sydney Station": (3, 4)
}
| {
"repo_name": "edwinsteele/visual-commute",
"path": "vcapp/transxchange_constants.py",
"copies": "1",
"size": "2327",
"license": "cc0-1.0",
"hash": -2825039874895789600,
"line_mean": 42.0925925926,
"line_max": 77,
"alpha_frac": 0.5376020627,
"autogenerated": false,
"ratio": 2.987163029525032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9020091651606317,
"avg_score": 0.0009346881237428502,
"num_lines": 54
} |
__all__ = ['bootstrap']
def bootstrap():
"""
Define global blueprint tasks, load configured blueprints and apply role definitions.
"""
# Monkey patch fabric
from .patch import patch
patch('fabric.operations.run')
patch('fabric.operations.sudo', 'refabric.operations.run')
patch('fabric.state.switch_env')
patch('fabric.tasks._execute')
patch('fabric.tasks.Task:get_hosts_and_effective_roles')
patch('fabric.utils._AttributeDict')
# Reload fabric's run/sudo internal import references to patched version
import fabric.api
import fabric.contrib.files
import fabric.contrib.project
import fabric.operations
for m in (fabric.api, fabric.contrib.files, fabric.contrib.project):
m.run = m.sudo = fabric.operations.run
import fabric.state
from fabric.decorators import task
from .state import load_blueprints
from .tasks import help_task, init_task
# Set environment defaults
fabric.state.env.update({
'sudo_user': 'root',
'colorize_errors': True,
'skip_unknown_tasks': True,
'merge_states': True,
'prompt_hosts': True,
'forward_agent': True,
'sudo_prefix': "sudo -S -E -H -p '%(sudo_prompt)s' SSH_AUTH_SOCK=$SSH_AUTH_SOCK",
'shell': '/bin/bash -c',
})
# Create global blueprint tasks
fabric.state.commands['help'] = task(help_task)
fabric.state.commands['init'] = task(init_task)
# Load configured blueprints
load_blueprints()
# Touch env.roles to trigger apply role definitions (needed for cli options -R, --list etc.)
fabric.state.env.roles = fabric.state.env.roles
| {
"repo_name": "5monkeys/refabric",
"path": "refabric/bootstrap.py",
"copies": "1",
"size": "1667",
"license": "mit",
"hash": 6366768321733642000,
"line_mean": 31.6862745098,
"line_max": 96,
"alpha_frac": 0.6622675465,
"autogenerated": false,
"ratio": 3.8767441860465115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006378725602497586,
"num_lines": 51
} |
__all__ = ("Boto3S3Wrapper",
"MockBoto3S3Wrapper")
import os
import shutil
import pyoram
import tqdm
try:
import boto3
import botocore
boto3_available = True
except: # pragma: no cover
boto3_available = False # pragma: no cover
import six
from six.moves import xrange, map
class Boto3S3Wrapper(object):
"""
A wrapper class for the boto3 S3 service.
"""
def __init__(self,
bucket_name,
aws_access_key_id=None,
aws_secret_access_key=None,
region_name=None):
if not boto3_available:
raise ImportError( # pragma: no cover
"boto3 module is required to " # pragma: no cover
"use BlockStorageS3 device") # pragma: no cover
self._s3 = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name).resource('s3')
self._bucket = self._s3.Bucket(bucket_name)
def exists(self, key):
try:
self._bucket.Object(key).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
pass
else:
raise e
else:
return True
# It's not a file. Check if it's a "directory".
for obj in self._bucket.objects.filter(
Prefix=key+"/"):
return True
return False
def download(self, key):
try:
return self._s3.meta.client.get_object(
Bucket=self._bucket.name,
Key=key)['Body'].read()
except botocore.exceptions.ClientError:
raise IOError("Can not download key: %s"
% (key))
def upload(self, key_block):
key, block = key_block
self._bucket.put_object(Key=key, Body=block)
# Chunk a streamed iterator of which we do not know
# the size
def _chunks(self, objs, n=100):
assert 1 <= n <= 1000 # required by boto3
objs = iter(objs)
try:
while (1):
chunk = []
while len(chunk) < n:
chunk.append({'Key': six.next(objs).key})
yield {'Objects': chunk}
except StopIteration:
pass
if len(chunk):
yield {'Objects': chunk}
def _del(self, chunk):
self._bucket.delete_objects(Delete=chunk)
return len(chunk['Objects'])
def clear(self, key, threadpool=None):
objs = self._bucket.objects.filter(Prefix=key+"/")
if threadpool is not None:
deliter = threadpool.imap(self._del, self._chunks(objs))
else:
deliter = map(self._del, self._chunks(objs))
with tqdm.tqdm(total=None, #len(objs),
desc="Clearing S3 Blocks",
unit=" objects",
disable=not pyoram.config.SHOW_PROGRESS_BAR) as progress_bar:
progress_bar.update(n=0)
for chunksize in deliter:
progress_bar.update(n=chunksize)
class MockBoto3S3Wrapper(object):
"""
A mock class for Boto3S3Wrapper that uses the local filesystem and
treats the bucket name as a directory.
This class is mainly used for testing, but could potentially be
used to setup storage locally that is then uploaded to S3 through
the AWS web portal.
"""
def __init__(self,
bucket_name,
aws_access_key_id=None,
aws_secret_access_key=None,
region_name=None):
self._bucket_name = os.path.abspath(
os.path.normpath(bucket_name))
# called within upload to create directory
# heirarchy on the fly
def _makedirs_if_needed(self, key):
if not os.path.exists(
os.path.dirname(
os.path.join(self._bucket_name, key))):
os.makedirs(
os.path.dirname(
os.path.join(self._bucket_name, key)))
assert not os.path.isdir(
os.path.join(self._bucket_name, key))
def exists(self, key):
return os.path.exists(
os.path.join(self._bucket_name, key))
def download(self, key):
with open(os.path.join(self._bucket_name, key), 'rb') as f:
return f.read()
def upload(self, key_block):
key, block = key_block
self._makedirs_if_needed(key)
with open(os.path.join(self._bucket_name, key), 'wb') as f:
f.write(block)
def clear(self, key, threadpool=None):
if os.path.exists(
os.path.join(self._bucket_name, key)):
if os.path.isdir(
os.path.join(self._bucket_name, key)):
shutil.rmtree(
os.path.join(self._bucket_name, key),
ignore_errors=True)
else:
os.remove(
os.path.join(self._bucket_name, key))
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/storage/boto3_s3_wrapper.py",
"copies": "1",
"size": "5205",
"license": "mit",
"hash": -6646967624793897000,
"line_mean": 31.9430379747,
"line_max": 84,
"alpha_frac": 0.5266090298,
"autogenerated": false,
"ratio": 4.06640625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50930152798,
"avg_score": null,
"num_lines": null
} |
__all__ = ('BottomBar')
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.core.image import Image as CoreImage
from kivy.uix.image import Image
from kivy.graphics import *
from pocketthrone.managers.pipe import L
from pocketthrone.managers.filemanager import FileManager
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.widgets.gamebutton import GameButton
from pocketthrone.widgets.gamelabel import GameLabel
from pocketthrone.entities.event import *
from pocketthrone.entities.enum import WidgetState, WidgetAction
class BottomBar(BoxLayout):
_tag = "[BottomBar] "
_dirty = True
def __init__(self, **kwargs):
super(BottomBar, self).__init__(orientation="horizontal", size_hint=(1, .1), pos=(0,0))
EventManager.register(self)
# make GameButtons
actionbutton = GameButton(size_hint=(.1, 1), link="actionbutton")
actionbutton.set_source("actionbutton_bg_default.png")
nextturnbutton = GameButton(size_hint=(.1, 1), link="nextturnbutton")
# make label widgets
labels = BoxLayout(orientation="vertical", halign="left", valign="top", size_hint=(1, .75))
heading = GameLabel(link="heading", weight=1.4)
details = GameLabel(link="details", weight=1.0)
labels.add_widget(heading)
labels.add_widget(details)
# add all to BottomBars background
self.add_widget(actionbutton)
self.add_widget(labels)
self.add_widget(nextturnbutton)
def trigger_redraw(self):
self._dirty = True
# add a widget to SideBar and the buttons list
def add_widget(self, widget):
print(self._tag + "add widget " + repr(widget))
super(BottomBar, self).add_widget(widget)
# method to destroy this BottomBar widget
def remove_self(self):
self.parent.remove_widget(self)
def update(self):
if self._dirty:
self.update_background()
self._dirty = False
def get_heading_text(self):
heading_text = L.WidgetManager.get_widget("heading").get_text()
return heading_text
def set_heading_text(self, value):
heading_label = L.WidgetManager.get_widget("heading").set_text(value)
def get_details_text(self):
details_text = L.WidgetManager.get_widget("details").get_text()
return details_text
def set_details_text(self, value):
L.WidgetManager.get_widget("details").set_text(value)
def set_actionbutton_state(self, value):
actionbutton = L.WidgetManager.get_widget("actionbutton")
actionbutton.set_button_state(value)
def get_actionbutton_state(self):
actionbutton = L.WidgetManager.get_widget("actionbutton")
return actionbutton.get_button_state()
def update_background(self):
pass
def on_event(self, event):
if isinstance(event, TickEvent):
self.update()
| {
"repo_name": "herrschr/prey-game",
"path": "pocketthrone/widgets/bottombar.py",
"copies": "2",
"size": "2746",
"license": "bsd-2-clause",
"hash": -3761308482425136600,
"line_mean": 30.2045454545,
"line_max": 93,
"alpha_frac": 0.7479970867,
"autogenerated": false,
"ratio": 3.2154566744730677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49634537611730684,
"avg_score": null,
"num_lines": null
} |
__all__ = ['BoxType', 'BoxDouble', 'BoxSingle']
class BoxType(object):
"""
A class containing the various line-drawing characters used in
drawing CP437 boxes.
"""
blank = ' '
horiz = ' '
vert = ' '
tl = ' '
bl = ' '
tr = ' '
br = ' '
scrollbar_top = ' '
scrollbar_bottom = ' '
scrollbar_center = ' '
scrollbar_center_block = chr(0xDB)
scrollbar_bottom_block = chr(0xDC)
scrollbar_top_block = chr(0xDF)
class BoxDouble(BoxType):
"""
A box bordered with double-line drawing characters.
"""
blank = ' '
horiz = chr(0xCD)
vert = chr(0xBA)
tl = chr(0xC9)
bl = chr(0xC8)
tr = chr(0xBB)
br = chr(0xBC)
scrollbar_top = chr(0xD2)
scrollbar_bottom = chr(0xD0)
scrollbar_center = chr(0xF0)
class BoxSingle(BoxType):
"""
A box bordered with single-line drawing characters.
"""
horiz = chr(0xC4)
vert = chr(0xB3)
tl = chr(0xDA)
bl = chr(0xC0)
tr = chr(0xBF)
br = chr(0xD9)
scrollbar_top = chr(0xD1)
scrollbar_bottom = chr(0xCF)
scrollbar_center = chr(0xD8)
| {
"repo_name": "jtruscott/ld27",
"path": "pytality/boxtypes.py",
"copies": "1",
"size": "1119",
"license": "bsd-3-clause",
"hash": 4348662610289652000,
"line_mean": 20.1132075472,
"line_max": 66,
"alpha_frac": 0.5674709562,
"autogenerated": false,
"ratio": 2.8766066838046274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39440776400046273,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Buffer']
class Buffer:
'''
Buffer objects are OpenGL objects that store an array of unformatted memory
allocated by the OpenGL context, (data allocated on the GPU).
These can be used to store vertex data, pixel data retrieved from images
or the framebuffer, and a variety of other things.
A Buffer object cannot be instantiated directly, it requires a context.
Use :py:meth:`Context.buffer` to create one.
Copy buffer content using :py:meth:`Context.copy_buffer`.
'''
__slots__ = ['mglo', '_size', '_dynamic', '_glo', 'ctx', 'extra']
def __init__(self):
self.mglo = None #: Internal representation for debug purposes only.
self._size = None #: Orignal buffer size during creation
self._dynamic = None
self._glo = None
self.ctx = None #: The context this object belongs to
self.extra = None #: Any - Attribute for storing user defined objects
raise TypeError()
def __repr__(self):
return '<Buffer: %d>' % self.glo
def __eq__(self, other):
return type(self) is type(other) and self.mglo is other.mglo
@property
def size(self) -> int:
'''
int: The size of the buffer.
'''
return self.mglo.size()
@property
def dynamic(self) -> bool:
'''
bool: Is the buffer created with the dynamic flag?
'''
return self._dynamic
@property
def glo(self) -> int:
'''
int: The internal OpenGL object.
This values is provided for debug purposes only.
'''
return self._glo
def write(self, data, *, offset=0) -> None:
'''
Write the content.
Args:
data (bytes): The data.
Keyword Args:
offset (int): The offset.
'''
self.mglo.write(data, offset)
def write_chunks(self, data, start, step, count) -> None:
'''
Split data to count equal parts.
Write the chunks using offsets calculated from start, step and stop.
Args:
data (bytes): The data.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
'''
self.mglo.write_chunks(data, start, step, count)
def read(self, size=-1, *, offset=0) -> bytes:
'''
Read the content.
Args:
size (int): The size. Value ``-1`` means all.
Keyword Args:
offset (int): The offset.
Returns:
bytes
'''
return self.mglo.read(size, offset)
def read_into(self, buffer, size=-1, *, offset=0, write_offset=0) -> None:
'''
Read the content into a buffer.
Args:
buffer (bytearray): The buffer that will receive the content.
size (int): The size. Value ``-1`` means all.
Keyword Args:
offset (int): The read offset.
write_offset (int): The write offset.
'''
return self.mglo.read_into(buffer, size, offset, write_offset)
def read_chunks(self, chunk_size, start, step, count) -> bytes:
'''
Read the content.
Read and concatenate the chunks of size chunk_size
using offsets calculated from start, step and stop.
Args:
chunk_size (int): The chunk size.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
Returns:
bytes
'''
return self.mglo.read_chunks(chunk_size, start, step, count)
def read_chunks_into(self, buffer, chunk_size, start, step, count, *, write_offset=0) -> None:
'''
Read the content.
Read and concatenate the chunks of size chunk_size
using offsets calculated from start, step and stop.
Args:
buffer (bytearray): The buffer that will receive the content.
chunk_size (int): The chunk size.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
Keyword Args:
write_offset (int): The write offset.
'''
return self.mglo.read(buffer, chunk_size, start, step, count, write_offset)
def clear(self, size=-1, *, offset=0, chunk=None) -> None:
'''
Clear the content.
Args:
size (int): The size. Value ``-1`` means all.
Keyword Args:
offset (int): The offset.
chunk (bytes): The chunk to use repeatedly.
'''
self.mglo.clear(size, offset, chunk)
def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None:
'''
Bind the buffer to a uniform block.
Args:
binding (int): The uniform block binding.
Keyword Args:
offset (int): The offset.
size (int): The size. Value ``-1`` means all.
'''
self.mglo.bind_to_uniform_block(binding, offset, size)
def bind_to_storage_buffer(self, binding=0, *, offset=0, size=-1) -> None:
'''
Bind the buffer to a shader storage buffer.
Args:
binding (int): The shader storage binding.
Keyword Args:
offset (int): The offset.
size (int): The size. Value ``-1`` means all.
'''
self.mglo.bind_to_storage_buffer(binding, offset, size)
def orphan(self, size=-1) -> None:
'''
Orphan the buffer with the option to specify a new size.
It is also called buffer re-specification.
Reallocate the buffer object before you start modifying it.
Since allocating storage is likely faster than the implicit synchronization,
you gain significant performance advantages over synchronization.
The old storage will still be used by the OpenGL commands that have been sent previously.
It is likely that the GL driver will not be doing any allocation at all,
but will just be pulling an old free block off the unused buffer queue and use it,
so it is likely to be very efficient.
Keyword Args:
size (int): The new byte size if the buffer. If not supplied
the buffer size will be unchanged.
.. rubric:: Example
.. code-block:: python
# For simplicity the VertexArray creation is omitted
>>> vbo = ctx.buffer(reserve=1024)
# Fill the buffer
>>> vbo.write(some_temporary_data)
# Issue a render call that uses the vbo
>>> vao.render(...)
# Orphan the buffer
>>> vbo.orphan()
# Issue another render call without waiting for the previous one
>>> vbo.write(some_temporary_data)
>>> vao.render(...)
# We can also resize the buffer. In this case we double the size
>> vbo.orphan(vbo.size * 2)
'''
self.mglo.orphan(size)
def release(self) -> None:
'''
Release the ModernGL object.
'''
self.mglo.release()
def bind(self, *attribs, layout=None):
"""Helper method for binding a buffer.
Returns:
(self, layout, *attribs) tuple
"""
return (self, layout, *attribs)
def assign(self, index):
"""Helper method for assigning a buffer.
Returns:
(self, index) tuple
"""
return (self, index)
| {
"repo_name": "cprogrammer1994/ModernGL",
"path": "moderngl/buffer.py",
"copies": "1",
"size": "8015",
"license": "mit",
"hash": 1761610567942753800,
"line_mean": 28.4669117647,
"line_max": 101,
"alpha_frac": 0.5312538989,
"autogenerated": false,
"ratio": 4.61162255466053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.564287645356053,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Bug']
from typing import List, Dict, Optional, Any, Tuple, Iterable
import os
import warnings
import logging
import attr
from .language import Language
from .test import TestSuite
from .coverage import CoverageInstructions
from ..compiler import Compiler
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
def _convert_languages(langs: Iterable[Language]) -> Tuple[Language, ...]:
return tuple(langs)
@attr.s(frozen=True)
class Bug(object):
"""
Bugs provide an immutable snapshot of a software system at a given
point in time, allowing it to be empirically studied and inspected in a
transparent and reproducible manner.
"""
name = attr.ib(type=str)
image = attr.ib(type=str)
dataset = attr.ib(type=Optional[str])
program = attr.ib(type=Optional[str])
source = attr.ib(type=Optional[str])
source_dir = attr.ib(type=str)
languages = attr.ib(type=Tuple[Language, ...], converter=_convert_languages)
tests = attr.ib(type=TestSuite)
compiler = attr.ib(type=Compiler)
instructions_coverage = attr.ib(type=Optional[CoverageInstructions],
default=None)
@property
def harness(self) -> TestSuite:
warnings.warn("'harness' is being deprecated in favour of 'tests'",
DeprecationWarning)
return self.tests
@staticmethod
def from_dict(d: Dict[str, Any]) -> 'Bug':
name = d['name']
languages = [Language[lang] for lang in d['languages']]
tests = TestSuite.from_dict(d['test-harness'])
compiler = Compiler.from_dict(d['compiler'])
instructions_coverage = None # type: Optional[CoverageInstructions]
has_cov_ins = 'coverage' in d
has_cov_ins_type = has_cov_ins and 'type' in d['coverage']
if has_cov_ins_type:
logger.debug("bug [%s]: using coverage instructions type [%s]",
name, d['coverage']['type'])
instructions_coverage = \
CoverageInstructions.from_dict(d['coverage'])
elif len(languages) == 1:
lang = languages[0]
cls_cov_ins = CoverageInstructions.language_default(lang)
if not cls_cov_ins:
logger.debug("no default coverage instructions for language [%s]",
lang)
else:
logger.debug("bug [%s]: using default coverage instructions type [%s] for language [%s]",
name, cls_cov_ins, lang.name)
instructions_coverage = \
cls_cov_ins.from_dict(d.get('coverage', {}))
else:
logger.warning("no coverage instructions for bug: %s", name)
return Bug(name=name,
image=d['image'],
dataset=d['dataset'],
program=d['program'],
source=d['source'],
source_dir=d['source-location'],
languages=languages,
tests=tests,
compiler=compiler,
instructions_coverage=instructions_coverage)
def to_dict(self) -> Dict[str, Any]:
"""
Produces a dictionary-based description of this bug, ready to be
serialised in a JSON or YAML format.
"""
d = {
'name': self.name,
'image': self.image,
'program': self.program,
'dataset': self.dataset,
'source': self.source,
'source-location': self.source_dir,
'languages': [l.name for l in self.languages],
'compiler': self.compiler.to_dict(),
'test-harness': self.tests.to_dict()
}
if self.instructions_coverage:
d['coverage'] = self.instructions_coverage.to_dict()
return d
| {
"repo_name": "ChrisTimperley/AutomatedRepairBenchmarks.c",
"path": "bugzoo/core/bug.py",
"copies": "3",
"size": "3892",
"license": "mit",
"hash": -951123956188341600,
"line_mean": 35.7169811321,
"line_max": 105,
"alpha_frac": 0.576053443,
"autogenerated": false,
"ratio": 4.230434782608696,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000479028175376923,
"num_lines": 106
} |
__all__ = ("BuildArtifactsResource",)
from sqlalchemy.orm import joinedload
from marshmallow import fields
from zeus.models import Artifact, Build, Job
from .base_build import BaseBuildResource
from ..schemas import ArtifactSchema, JobSchema
class _ArtifactWithJobSchema(ArtifactSchema):
job = fields.Nested(
JobSchema(exclude=("stats", "failures")), dump_only=True, required=False
)
artifacts_schema = _ArtifactWithJobSchema(many=True)
class BuildArtifactsResource(BaseBuildResource):
def get(self, build: Build):
"""
Return a list of artifacts for a given build.
"""
query = (
Artifact.query.options(
joinedload("job"),
joinedload("job").joinedload("build"),
joinedload("job").joinedload("build").joinedload("repository"),
)
.join(Job, Job.id == Artifact.job_id)
.filter(Job.build_id == build.id)
.order_by(Artifact.name.asc())
)
return self.respond_with_schema(artifacts_schema, query)
| {
"repo_name": "getsentry/zeus",
"path": "zeus/api/resources/build_artifacts.py",
"copies": "1",
"size": "1075",
"license": "apache-2.0",
"hash": -1080123855051662800,
"line_mean": 28.0540540541,
"line_max": 80,
"alpha_frac": 0.6334883721,
"autogenerated": false,
"ratio": 3.9814814814814814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033366700033366696,
"num_lines": 37
} |
__all__ = ('Buildign')
class Building:
# engine properties
_id = -1
name = ""
name_de = ""
size = "1x1"
# building type
_type = None
# possible building types & names
_types = ["wall", "tower", "blacksmith", "bordel", "stables", "harbor", "tunnels", "siege_workshop", "mansion", "market"]
_names = {
"wall": "Wall",
"tower": "Tower",
"blacksmith": "Blacksmith",
"bordel": "Bordel",
"stables": "Stables",
"harbour": "Harbour",
"tunnels": "Tunnel Systems",
"siege_workshop": "Siege Workshop",
"mansion": "Mansion"}
# set a non-default image path to draw here
# normally it will be generated from building name
image_override = None
# building flags
is_undestroyable = False
is_on_water = False
max_per_player = -1
max_per_map = -1
# changeable building vars
city = None
player_num = -1
hp = -1
# absolute position & psoition relativ to town center
pos_x = -1
pos_y = -1
rel_x = None
rel_y = None
def __init__(self, city, building_type):
# abort when building_type is not defined
if not building_type in self._types:
print("[Building] type " + building_type +" is not defined")
return
# set building type & parent city
self._type = building_type
self.city = city
self.player_num = city.get_player_num()
def get_type(self):
'''returns the type of this building'''
return self._type
def get_player_num(self):
'''returns the number of buildings owner'''
return self.player_num
def get_name(self):
'''return the english name of this building'''
building_name = self._names[self.get_type()]
return building_name
def get_city(self):
'''returns the parent city'''
return self.city
def get_image_path(self):
'''return the image file name of this building'''
if self.image_override:
return self.image_override
else:
return "bld_" + self.get_type()
def set_position(self, (pos_x, pos_y)):
'''set the absolute position of this building'''
# set absolute position
self.pos_x = pos_x
self.pos_y = pos_y
# calculate & set relative position
city_pos = self.city.get_position()
self.rel_x = city_pos[0] - pos_x
self.rel_y = city_pos[1] - pos_y
def set_relative_position(self, (rel_x, rel_y)):
'''sets the relative position of this building towards it city'''
# set relative position
self.rel_x = rel_x
self.rel_y = rel_y
# calculate & set absolute position
city_pos = self.city.get_position()
self.pos_x = city_pos[0] + pos_x
self.pos_y = city_pos[1] + pos_y
def get_position(self):
'''returns absolute position of this building'''
return (self.pos_x, self.pos_y)
def get_relative_position(self):
'''get the relative position of this building'''
return (self.rel_x, self.rel_y)
def __repr__(self):
return "<building city=" + self.get_city().get_name() + " type=" + self.get_type() + " pos=" + str(self.get_relative_position()) + ">"
| {
"repo_name": "herrschr/pocket-throne",
"path": "pocketthrone/entities/building.py",
"copies": "2",
"size": "2877",
"license": "bsd-2-clause",
"hash": -1064151283890396400,
"line_mean": 25.3944954128,
"line_max": 136,
"alpha_frac": 0.6548488008,
"autogenerated": false,
"ratio": 2.8626865671641792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9336826563984011,
"avg_score": 0.0361417607960338,
"num_lines": 109
} |
__all__ = ['BuildInstructions']
from typing import Optional, Dict, Any
from pprint import pprint as pp
import warnings
import os
import shutil
import json
import attr
import docker
import yaml
def converter_args(args: Dict[str, Any]) -> Dict[str, str]:
return {k: str(v) for (k, v) in args.items()}
@attr.s(frozen=True)
class BuildInstructions(object):
"""
Used to store instructions on how to build a Docker image.
TODO: only allow relative, forward roots
"""
root = attr.ib(type=str)
tag = attr.ib(type=str)
context = attr.ib(type=str)
"""
The path to the Dockerfile used to build the image associated with
these instructions, relative to the location of the build instruction
file.
"""
filename = attr.ib(type=str)
"""
A dictionary of build-time arguments provided during the construction
of the Docker image associated with these instructions.
"""
arguments = attr.ib(type=Dict[str, str],
converter=converter_args)
"""
The name of the source that provides this blueprint, if any.
"""
source = attr.ib(type=Optional[str])
"""
The name of the Docker image that the construction of the image
associated with these build instructions depends on. If no such
dependency exists, None is returned.
"""
depends_on = attr.ib(type=Optional[str])
"""
The name of the build stage, if any, that should be used when building this
image.
"""
build_stage = attr.ib(type=Optional[str], default=None)
@property
def name(self) -> str:
return self.tag
@property
def abs_context(self) -> str:
path = os.path.join(self.root, self.context)
path = os.path.normpath(path)
return path
@property
def filename_abs(self) -> str:
return os.path.join(self.root, self.filename)
| {
"repo_name": "ChrisTimperley/RepairBox",
"path": "bugzoo/core/build.py",
"copies": "2",
"size": "1888",
"license": "mit",
"hash": -727654362655577500,
"line_mean": 24.5135135135,
"line_max": 79,
"alpha_frac": 0.6525423729,
"autogenerated": false,
"ratio": 3.933333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 74
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.