code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Code adapted from https://github.com/karpathy/neuraltalk
# by Andrej Karpathy
import json
import os
import random
import scipy.io
import codecs
from collections import defaultdict
import itertools
import gzip
import sys
import numpy
class BasicDataProvider:
def __init__(self, dataset, root='.', extra_train=False, audio_kind=None):
self.root = root
# !assumptions on folder structure
self.dataset_root = os.path.join(self.root, 'data', dataset)
self.image_root = os.path.join(self.root, 'data', dataset, 'imgs')
# load the dataset into memory
dataset_path = os.path.join(self.dataset_root, 'dataset.json')
ipa_path = os.path.join(self.dataset_root, 'dataset.ipa.jsonl.gz')
audio_path = os.path.join(self.dataset_root, 'dataset.{}.npy'.format(audio_kind))
self.dataset = json.load(open(dataset_path, 'r'))
# load ipa
try:
IPA = {}
for line in gzip.open(ipa_path):
item = json.loads(line)
IPA[item['sentid']] = item['phonemes']
# add ipa field to dataset
for image in self.dataset['images']:
for sentence in image['sentences']:
sentence['ipa'] = IPA[sentence['sentid']]
except IOError:
sys.stderr.write("Could not read file {}: IPA transcription not available\n".format(ipa_path))
try:
AUDIO = numpy.load(audio_path)
sentid = 0
for image in self.dataset['images']:
for sentence in image['sentences']:
sentence['audio'] = AUDIO[sentid]
sentid += 1
except IOError:
sys.stderr.write("Could not read file {}: audio features not available\n".format(audio_path))
# load the image features into memory
features_path = os.path.join(self.dataset_root, 'vgg_feats.mat')
features_struct = scipy.io.loadmat(features_path)
self.features = features_struct['feats']
# group images by their train/val/test split into a dictionary -> list structure
self.split = defaultdict(list)
for img in self.dataset['images']:
if extra_train and img['split'] == 'restval':
img['split']='train'
self.split[img['split']].append(img)
# "PRIVATE" FUNCTIONS
# in future we may want to create copies here so that we don't touch the
# data provider class data, but for now lets do the simple thing and
# just return raw internal img sent structs. This also has the advantage
# that the driver could store various useful caching stuff in these structs
# and they will be returned in the future with the cache present
def _getImage(self, img):
""" create an image structure for the driver """
# lazily fill in some attributes
if not 'local_file_path' in img: img['local_file_path'] = os.path.join(self.image_root, img['filename'])
if not 'feat' in img: # also fill in the features
feature_index = img['imgid'] # NOTE: imgid is an integer, and it indexes into features
img['feat'] = self.features[:,feature_index]
return img
def _getSentence(self, sent):
""" create a sentence structure for the driver """
# NOOP for now
return sent
# PUBLIC FUNCTIONS
def getSplitSize(self, split, ofwhat = 'sentences'):
""" return size of a split, either number of sentences or number of images """
if ofwhat == 'sentences':
return sum(len(img['sentences']) for img in self.split[split])
else: # assume images
return len(self.split[split])
def sampleImageSentencePair(self, split = 'train'):
""" sample image sentence pair from a split """
images = self.split[split]
img = random.choice(images)
sent = random.choice(img['sentences'])
out = {}
out['image'] = self._getImage(img)
out['sentence'] = self._getSentence(sent)
return out
def iterImageSentencePair(self, split = 'train', max_images = -1):
for i,img in enumerate(self.split[split]):
if max_images >= 0 and i >= max_images: break
for sent in img['sentences']:
out = {}
out['image'] = self._getImage(img)
out['sentence'] = self._getSentence(sent)
yield out
def iterImageSentencePairBatch(self, split = 'train', max_images = -1, max_batch_size = 100):
batch = []
for i,img in enumerate(self.split[split]):
if max_images >= 0 and i >= max_images: break
for sent in img['sentences']:
out = {}
out['image'] = self._getImage(img)
out['sentence'] = self._getSentence(sent)
batch.append(out)
if len(batch) >= max_batch_size:
yield batch
batch = []
if batch:
yield batch
def iterSentences(self, split = 'train'):
for img in self.split[split]:
for sent in img['sentences']:
yield self._getSentence(sent)
def iterImages(self, split = 'train', shuffle = False, max_images = -1):
imglist = self.split[split]
ix = range(len(imglist))
if shuffle:
random.shuffle(ix)
if max_images > 0:
ix = ix[:min(len(ix),max_images)] # crop the list
for i in ix:
yield self._getImage(imglist[i])
def getDataProvider(dataset, root='.', extra_train=False, audio_kind='fbank'):
""" we could intercept a special dataset and return different data providers """
assert dataset in ['flickr8k', 'flickr30k', 'coco', 'coco+flickr30k'], 'dataset %s unknown' % (dataset, )
if dataset == 'coco+flickr30k':
return CombinedDataProvider(datasets=['coco', 'flickr30k'], root=root, extra_train=extra_train, audio_kind=audio_kind)
else:
return BasicDataProvider(dataset, root, extra_train=extra_train, audio_kind=audio_kind)
class CombinedDataProvider(object):
def __init__(self, datasets, root='.', extra_train=False, audio_kind='fbank'):
self.datasets = datasets
self.root = root
self.providers = [ BasicDataProvider(dataset, root=self.root, extra_train=extra_train, audio_kind=audio_kind)
for dataset in self.datasets ]
def getSplitSize(self, split, ofwhat='sentences'):
return sum((p.getSplitSize(split, ofwhat=ofwhat) for p in self.providers))
def sampleImageSentencePair(self, split='train'):
raise NotImplementedError()
def iterImageSentencesPair(self, split='train', max_images=-1):
iters = [ p.iterImageSentencePair(split=split, max_images=-1) for p in self.providers ]
for item in itertools.chain(*iters):
yield item
def iterImageSentencePairBatch(self, split='train', max_images=-1, max_batch_size = 100):
iters = [ p.iterImageSentencePairBatch(split=split, max_images=max_images, max_batch_size=max_batch_size) for p in self.providers ]
for item in itertools.chain(*iters):
yield item
def iterSentences(self, split = 'train'):
iters = [ p.iterSentences(split=split) for p in self.providers ]
for item in itertools.chain(*iters):
yield item
def iterImages(self, split = 'train', max_images = -1):
iters = [ p.iterImages(split=split, max_images=max_images) for p in self.providers ]
for item in itertools.chain(*iters):
yield item
|
gchrupala/reimaginet
|
imaginet/data_provider.py
|
Python
|
mit
| 7,067
|
import arrow
import discord
from sigma.core.permission import check_man_msg, check_admin
from sigma.core.utils import user_avatar
async def textmute(cmd, message, args):
if not check_man_msg(message.author, message.channel):
response = discord.Embed(title='⛔ Unpermitted. Manage Messages Permission Needed.', color=0xDB0000)
else:
if not message.mentions:
response = discord.Embed(title='❗ No user targeted.', color=0xDB0000)
else:
target = message.mentions[0]
if target.id != message.author.id or check_admin(target, message.channel):
try:
mute_list = cmd.db.get_settings(message.guild.id, 'MutedUsers')
except:
mute_list = []
if target.id in mute_list:
response = discord.Embed(title='❗ User already muted.', color=0xDB0000)
else:
mute_list.append(target.id)
cmd.db.set_settings(message.guild.id, 'MutedUsers', mute_list)
response = discord.Embed(color=0x66CC66,
title=f'✅ {target.name}#{target.discriminator} has been text muted.')
try:
log_channel_id = cmd.db.get_settings(message.guild.id, 'LoggingChannel')
except:
log_channel_id = None
if log_channel_id:
log_channel = discord.utils.find(lambda x: x.id == log_channel_id, message.guild.channels)
if log_channel:
log_embed = discord.Embed(color=0x696969, timestamp=arrow.utcnow().datetime)
log_embed.set_author(name='A Member Has Been Muted', icon_url=user_avatar(target))
log_embed.add_field(name='🔇 Muted User',
value=f'{target.mention}\n{target.name}#{target.discriminator}',
inline=True)
author = message.author
log_embed.add_field(name='🛡 Responsible',
value=f'{author.mention}\n{author.name}#{author.discriminator}',
inline=True)
if len(args) > 1:
log_embed.add_field(name='📄 Reason', value=f"```\n{' '.join(args[1:])}\n```", inline=False)
log_embed.set_footer(text=f'UserID: {target.id}')
await log_channel.send(embed=log_embed)
else:
response = discord.Embed(title='❗ You can\'t mute yourself.', color=0xDB0000)
await message.channel.send(embed=response)
|
aurora-pro/apex-sigma
|
sigma/plugins/moderation/punish/textmute.py
|
Python
|
gpl-3.0
| 2,873
|
from unittest import TestCase
from nav.tableformat import SimpleTableFormatter
class TestSimpleTableFormatter(TestCase):
def test_column_count(self):
data = (('one', 'two', 'three'),
('alice', 'bob', 'charlie'))
s = SimpleTableFormatter(data)
self.assertEqual(s._get_column_count(), 3)
def test_get_max_width_of_column(self):
data = (('1234', '12345', '1234567'),
('123', '12', '123456'))
s = SimpleTableFormatter(data)
self.assertEqual(s._get_max_width_of_column(0), 4)
self.assertEqual(s._get_max_width_of_column(1), 5)
self.assertEqual(s._get_max_width_of_column(2), 7)
def test_get_max_width_of_column_with_integers(self):
data = ((1234, '12345', '1234567'),
('123', 12, '123456'))
s = SimpleTableFormatter(data)
self.assertEqual(s._get_max_width_of_column(0), 4)
self.assertEqual(s._get_max_width_of_column(1), 5)
self.assertEqual(s._get_max_width_of_column(2), 7)
def test_find_widest_elements(self):
data = (('1234', '12345', '1234567'),
('123', '12', '123456'))
s = SimpleTableFormatter(data)
self.assertEqual(s._find_widest_elements(), [4, 5, 7])
def test_format_row(self):
row = ['one', 'two', 'three']
widths = [len(i) for i in row]
s = SimpleTableFormatter(None)
self.assertEqual(s._format_row(row, widths), 'one | two | three')
def test_get_formatted_table(self):
data = (('1234', '12345', '1234567'),
('123', '12', '123456'))
s = SimpleTableFormatter(data)
self.assertEqual(s.get_formatted_table(),
"1234 | 12345 | 1234567\n"
" 123 | 12 | 123456")
|
UNINETT/nav
|
tests/unittests/general/test_tableformat.py
|
Python
|
gpl-2.0
| 1,813
|
import numina.core.pipeline
import pytest
from ..drpbase import DrpBase
def test_drpbase():
drpbase = DrpBase()
with pytest.raises(KeyError):
drpbase.query_by_name('TEST1')
assert drpbase.query_all() == {}
def test_invalid_instrument1():
class Something(object):
pass
drpbase = DrpBase()
assert drpbase.instrumentdrp_check(Something(), 'TEST1') == False
def test_invalid_instrument1_warning():
with pytest.warns(RuntimeWarning):
test_invalid_instrument1()
def test_invalid_instrument2():
insdrp = numina.core.pipeline.InstrumentDRP('MYNAME', {}, {}, [], [])
drpbase = DrpBase()
res = drpbase.instrumentdrp_check(insdrp, 'TEST1')
assert res == False
@pytest.mark.xfail(reason="warning seems unreliable")
def test_invalid_instrument2_warning():
insdrp = numina.core.pipeline.InstrumentDRP('MYNAME', {}, {}, [], [])
drpbase = DrpBase()
with pytest.warns(RuntimeWarning):
drpbase.instrumentdrp_check(insdrp, 'TEST1')
def test_valid_instrument():
insdrp = numina.core.pipeline.InstrumentDRP('TEST1', {}, {}, [], [])
drpbase = DrpBase()
res = drpbase.instrumentdrp_check(insdrp, 'TEST1')
assert res
|
guaix-ucm/numina
|
numina/drps/tests/test_drpbase.py
|
Python
|
gpl-3.0
| 1,219
|
def check(time):
box = A[:]
now = last
for student in range(m):
rest = time - now - 1
if rest <= 0:
return False
while now >= 0 and rest >= 0:
if box[now] <= rest:
rest -= box[now]
now -= 1
else:
box[now] -= rest
break
if now == -1:
return True
return False
n, m = map(int, input().split())
A = list(map(int, input().split()))
last = n-1
while A[last] == 0:
last -= 1
low, high = 1, n + sum(A) + 1
while high - low > 1:
mid = (high + low) // 2
if check(mid) == False:
low = mid
else:
high = mid
print(high)
|
knuu/competitive-programming
|
codeforces/cdf307_2c.py
|
Python
|
mit
| 701
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.ruby
~~~~~~~~~~~~~~~~~~~~
Lexers for Ruby and related languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \
bygroups, default, LexerContext, do_insertions, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Generic
from pygments.util import shebang_matches
__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer']
line_re = re.compile('.*?\n')
RUBY_OPERATORS = (
'*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~',
'[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '==='
)
class RubyLexer(ExtendedRegexLexer):
"""
For `Ruby <http://www.ruby-lang.org>`_ source code.
"""
name = 'Ruby'
aliases = ['rb', 'ruby', 'duby']
filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec',
'*.rbx', '*.duby', 'Gemfile']
mimetypes = ['text/x-ruby', 'application/x-ruby']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Ruby...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_rubystrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
# easy ones
(r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
(words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
(r'([a-zA-Z_]\w*)(:)(?!:)',
bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%[qsw]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
(lbrace, String.Regex, '#push'),
(rbrace + '[mixounse]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other),
(r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'\A#!.+?$', Comment.Hashbang),
(r'#.*?$', Comment.Single),
(r'=begin\s.*?\n=end.*?$', Comment.Multiline),
# keywords
(words((
'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?',
'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo',
'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef',
'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'),
Keyword),
# start of function, class and module names
(r'(module)(\s+)([a-zA-Z_]\w*'
r'(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
# special methods
(words((
'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader',
'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private',
'module_function', 'public', 'protected', 'true', 'false', 'nil'),
suffix=r'\b'),
Keyword.Pseudo),
(r'(not|and|or)\b', Operator.Word),
(words((
'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include',
'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil',
'private_method_defined', 'protected_method_defined',
'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'),
Name.Builtin),
(r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
(words((
'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort',
'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller',
'catch', 'chomp', 'chop', 'class_eval', 'class_variables',
'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set',
'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork',
'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub',
'hash', 'id', 'included_modules', 'inspect', 'instance_eval',
'instance_method', 'instance_methods',
'instance_variable_get', 'instance_variable_set', 'instance_variables',
'lambda', 'load', 'local_variables', 'loop',
'method', 'method_missing', 'methods', 'module_eval', 'name',
'object_id', 'open', 'p', 'print', 'printf', 'private_class_method',
'private_instance_methods',
'private_methods', 'proc', 'protected_instance_methods',
'protected_methods', 'public_class_method',
'public_instance_methods', 'public_methods',
'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require',
'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep',
'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint',
'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint',
'untrace_var', 'warn'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z]\w+', Name.Constant),
# this is needed because ruby attributes can look
# like keywords (class) or like this: ` ?!?
(words(RUBY_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
(r'[a-zA-Z_]\w*[!?]?', Name),
(r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'\(', Punctuation, 'defexpr'),
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'\(', Punctuation, 'defexpr'),
(r'<<', Operator, '#pop'),
(r'[A-Z_]\w*', Name.Class, '#pop'),
default('#pop')
],
'defexpr': [
(r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
(r'\(', Operator, '#push'),
include('root')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
(r'#@@?[a-zA-Z_]\w*', String.Interpol),
(r'#\$[a-zA-Z_]\w*', String.Interpol)
],
'string-intp-escaped': [
include('string-intp'),
(r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})',
String.Escape)
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[mixounse]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
tokens.update(gen_rubystrings_rules())
def analyse_text(text):
return shebang_matches(text, r'ruby(1\.\d)?')
class RubyConsoleLexer(Lexer):
"""
For Ruby interactive console (**irb**) output like:
.. sourcecode:: rbcon
irb(main):001:0> a = 1
=> 1
irb(main):002:0> puts a
1
=> nil
"""
name = 'Ruby irb session'
aliases = ['rbcon', 'irb']
mimetypes = ['text/x-ruby-shellsession']
_prompt_re = re.compile('irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] '
'|>> |\?> ')
def get_tokens_unprocessed(self, text):
rblexer = RubyLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(
insertions, rblexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(
insertions, rblexer.get_tokens_unprocessed(curcode)):
yield item
class FancyLexer(RegexLexer):
"""
Pygments Lexer For `Fancy <http://www.fancy-lang.org/>`_.
Fancy is a self-hosted, pure object-oriented, dynamic,
class-based, concurrent general-purpose programming language
running on Rubinius, the Ruby VM.
.. versionadded:: 1.5
"""
name = 'Fancy'
filenames = ['*.fy', '*.fancypack']
aliases = ['fancy', 'fy']
mimetypes = ['text/x-fancysrc']
tokens = {
# copied from PerlLexer:
'balanced-regex': [
(r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'\{(\\\\|\\\}|[^}])*\}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\\)|[^)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\s+', Text),
# balanced delimiters (copied from PerlLexer):
(r's\{(\\\\|\\\}|[^}])*\}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\\)|[^)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
# Comments
(r'#(.*?)\n', Comment.Single),
# Symbols
(r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol),
# Multi-line DoubleQuotedString
(r'"""(\\\\|\\"|[^"])*"""', String),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# keywords
(r'(def|class|try|catch|finally|retry|return|return_local|match|'
r'case|->|=>)\b', Keyword),
# constants
(r'(self|super|nil|false|true)\b', Name.Constant),
(r'[(){};,/?|:\\]', Punctuation),
# names
(words((
'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String',
'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass',
'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set',
'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'),
Name.Builtin),
# functions
(r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function),
# operators, must be below functions
(r'[-+*/~,<>=&!?%^\[\].$]+', Operator),
('[A-Z]\w*', Name.Constant),
('@[a-zA-Z_]\w*', Name.Variable.Instance),
('@@[a-zA-Z_]\w*', Name.Variable.Class),
('@@?', Operator),
('[a-zA-Z_]\w*', Name),
# numbers - / checks are necessary to avoid mismarking regexes,
# see comment in RubyLexer
(r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
(r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
]
}
|
wandb/client
|
wandb/vendor/pygments/lexers/ruby.py
|
Python
|
mit
| 22,141
|
"""
Definition of the course team feature.
"""
from django.utils.translation import ugettext_noop
from courseware.tabs import EnrolledTab
from . import is_feature_enabled
class TeamsTab(EnrolledTab):
"""
The representation of the course teams view type.
"""
type = "teams"
title = ugettext_noop("Teams")
view_name = "teams_dashboard"
@classmethod
def is_enabled(cls, course, user=None):
"""Returns true if the teams feature is enabled in the course.
Args:
course (CourseDescriptor): the course using the feature
user (User): the user interacting with the course
"""
if not super(TeamsTab, cls).is_enabled(course, user=user):
return False
return is_feature_enabled(course)
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/teams/plugins.py
|
Python
|
agpl-3.0
| 788
|
import os
import fnmatch
import py_compile
from django.core.management.base import NoArgsCommand, CommandError
from django.conf import settings
from optparse import make_option
from os.path import join as _j
from django_extensions.management.utils import signalcommand
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--path', '-p', action='store', dest='path',
help='Specify path to recurse into'),
)
help = "Compile python bytecode files for the project."
requires_model_validation = False
@signalcommand
def handle_noargs(self, **options):
project_root = options.get("path", None)
if not project_root:
project_root = getattr(settings, 'BASE_DIR', None)
verbosity = int(options.get("verbosity"))
if not project_root:
raise CommandError("No --path specified and settings.py does not contain BASE_DIR")
for root, dirs, filenames in os.walk(project_root):
for filename in fnmatch.filter(filenames, '*.py'):
full_path = _j(root, filename)
if verbosity > 1:
self.stdout.write("Compiling %s...\n" % full_path)
py_compile.compile(full_path)
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/compile_pyc.py
|
Python
|
gpl-2.0
| 1,277
|
'''
---------------------------------------- Masquerade ----------------------------------------
Allow course staff to see a student or staff view of courseware.
Which kind of view has been selected is stored in the session state.
'''
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from opaque_keys.edx.keys import CourseKey
from xblock.fragment import Fragment
from xblock.runtime import KeyValueStore
from student.models import CourseEnrollment
from util.json_request import JsonResponse, expect_json
from xmodule.partitions.partitions import NoSuchUserPartitionGroupError
log = logging.getLogger(__name__)
# The key used to store a user's course-level masquerade information in the Django session.
# The value is a dict from course keys to CourseMasquerade objects.
MASQUERADE_SETTINGS_KEY = 'masquerade_settings'
# The key used to store temporary XBlock field data in the Django session. This is where field
# data is stored to avoid modifying the state of the user we are masquerading as.
MASQUERADE_DATA_KEY = 'masquerade_data'
class CourseMasquerade(object):
"""
Masquerade settings for a particular course.
"""
def __init__(self, course_key, role='student', user_partition_id=None, group_id=None, user_name=None):
# All parameters to this function must be named identically to the corresponding attribute.
# If you remove or rename an attribute, also update the __setstate__() method to migrate
# old data from users' sessions.
self.course_key = course_key
self.role = role
self.user_partition_id = user_partition_id
self.group_id = group_id
self.user_name = user_name
def __setstate__(self, state):
"""
Ensure that all attributes are initialised when unpickling CourseMasquerade objects.
Users might still have CourseMasquerade objects from older versions of the code in their
session. These old objects might not have all attributes set, possibly resulting in
AttributeErrors.
"""
self.__init__(**state)
@require_POST
@login_required
@expect_json
def handle_ajax(request, course_key_string):
"""
Handle AJAX posts to update the current user's masquerade for the specified course.
The masquerade settings are stored in the Django session as a dict from course keys
to CourseMasquerade objects.
"""
course_key = CourseKey.from_string(course_key_string)
masquerade_settings = request.session.get(MASQUERADE_SETTINGS_KEY, {})
request_json = request.json
role = request_json.get('role', 'student')
group_id = request_json.get('group_id', None)
user_partition_id = request_json.get('user_partition_id', None) if group_id is not None else None
user_name = request_json.get('user_name', None)
if user_name:
users_in_course = CourseEnrollment.objects.users_enrolled_in(course_key)
try:
if '@' in user_name:
user_name = users_in_course.get(email=user_name).username
else:
users_in_course.get(username=user_name)
except User.DoesNotExist:
return JsonResponse({
'success': False,
'error': _(
'There is no user with the username or email address {user_name} '
'enrolled in this course.'
).format(user_name=user_name)
})
masquerade_settings[course_key] = CourseMasquerade(
course_key,
role=role,
user_partition_id=user_partition_id,
group_id=group_id,
user_name=user_name,
)
request.session[MASQUERADE_SETTINGS_KEY] = masquerade_settings
return JsonResponse({'success': True})
def setup_masquerade(request, course_key, staff_access=False, reset_masquerade_data=False):
"""
Sets up masquerading for the current user within the current request. The request's user is
updated to have a 'masquerade_settings' attribute with the dict of all masqueraded settings if
called from within a request context. The function then returns a pair (CourseMasquerade, User)
with the masquerade settings for the specified course key or None if there isn't one, and the
user we are masquerading as or request.user if masquerading as a specific user is not active.
If the reset_masquerade_data flag is set, the field data stored in the session will be cleared.
"""
if (
request.user is None or
not settings.FEATURES.get('ENABLE_MASQUERADE', False) or
not staff_access
):
return None, request.user
if reset_masquerade_data:
request.session.pop(MASQUERADE_DATA_KEY, None)
masquerade_settings = request.session.setdefault(MASQUERADE_SETTINGS_KEY, {})
# Store the masquerade settings on the user so it can be accessed without the request
request.user.masquerade_settings = masquerade_settings
course_masquerade = masquerade_settings.get(course_key, None)
masquerade_user = None
if course_masquerade and course_masquerade.user_name:
try:
masquerade_user = CourseEnrollment.objects.users_enrolled_in(course_key).get(
username=course_masquerade.user_name
)
except User.DoesNotExist:
# This can only happen if the user was unenrolled from the course since masquerading
# was enabled. We silently reset the masquerading configuration in this case.
course_masquerade = None
del masquerade_settings[course_key]
request.session.modified = True
else:
# Store the masquerading settings on the masquerade_user as well, since this user will
# be used in some places instead of request.user.
masquerade_user.masquerade_settings = request.user.masquerade_settings
masquerade_user.real_user = request.user
return course_masquerade, masquerade_user or request.user
def get_course_masquerade(user, course_key):
"""
Returns the masquerade for the current user for the specified course. If no masquerade has
been installed, then a default no-op masquerade is returned.
"""
masquerade_settings = getattr(user, 'masquerade_settings', {})
return masquerade_settings.get(course_key, None)
def get_masquerade_role(user, course_key):
"""
Returns the role that the user is masquerading as, or None if no masquerade is in effect.
"""
course_masquerade = get_course_masquerade(user, course_key)
return course_masquerade.role if course_masquerade else None
def is_masquerading_as_student(user, course_key):
"""
Returns true if the user is a staff member masquerading as a student.
"""
return get_masquerade_role(user, course_key) == 'student'
def is_masquerading_as_specific_student(user, course_key): # pylint: disable=invalid-name
"""
Returns whether the user is a staff member masquerading as a specific student.
"""
course_masquerade = get_course_masquerade(user, course_key)
return bool(course_masquerade and course_masquerade.user_name)
def get_masquerading_user_group(course_key, user, user_partition):
"""
If the current user is masquerading as a generic learner in a specific group, return that group.
If the user is not masquerading as a group, then None is returned.
"""
course_masquerade = get_course_masquerade(user, course_key)
if course_masquerade:
if course_masquerade.user_partition_id == user_partition.id and course_masquerade.group_id is not None:
try:
return user_partition.get_group(course_masquerade.group_id)
except NoSuchUserPartitionGroupError:
return None
# The user is masquerading as a generic student or not masquerading as a group return None
return None
# Sentinel object to mark deleted objects in the session cache
_DELETED_SENTINEL = object()
class MasqueradingKeyValueStore(KeyValueStore):
"""
A `KeyValueStore` to avoid affecting the user state when masquerading.
This `KeyValueStore` wraps an underlying `KeyValueStore`. Reads are forwarded to the underlying
store, but writes go to a Django session (or other dictionary-like object).
"""
def __init__(self, kvs, session):
"""
Arguments:
kvs: The KeyValueStore to wrap.
session: The Django session used to store temporary data in.
"""
self.kvs = kvs
self.session = session
self.session_data = session.setdefault(MASQUERADE_DATA_KEY, {})
def _serialize_key(self, key):
"""
Convert the key of Type KeyValueStore.Key to a string.
Keys are not JSON-serializable, so we can't use them as keys for the Django session.
The implementation is taken from cms/djangoapps/contentstore/views/session_kv_store.py.
"""
return repr(tuple(key))
def get(self, key):
key_str = self._serialize_key(key)
try:
value = self.session_data[key_str]
except KeyError:
return self.kvs.get(key)
else:
if value is _DELETED_SENTINEL:
raise KeyError(key_str)
return value
def set(self, key, value):
self.session_data[self._serialize_key(key)] = value
self.session.modified = True
def delete(self, key):
# We can't simply delete the key from the session, since it might still exist in the kvs,
# which we are not allowed to modify, so we mark it as deleted by setting it to
# _DELETED_SENTINEL in the session.
self.set(key, _DELETED_SENTINEL)
def has(self, key):
try:
value = self.session_data[self._serialize_key(key)]
except KeyError:
return self.kvs.has(key)
else:
return value != _DELETED_SENTINEL
def filter_displayed_blocks(block, unused_view, frag, unused_context):
"""
A wrapper to only show XBlocks that set `show_in_read_only_mode` when masquerading as a specific user.
We don't want to modify the state of the user we are masquerading as, so we can't show XBlocks
that store information outside of the XBlock fields API.
"""
if getattr(block, 'show_in_read_only_mode', False):
return frag
return Fragment(
_(u'This type of component cannot be shown while viewing the course as a specific student.')
)
|
pepeportela/edx-platform
|
lms/djangoapps/courseware/masquerade.py
|
Python
|
agpl-3.0
| 10,684
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example of data analysis based on a HDF5-like data model
========================================================
.. note:: WORK-IN-PROGRESS / NOT COMPLETE!
This example aims building a data analysis workflow based on a HDF5
representation. The goal is to keep everything in a HDF5-like container class.
Notes on the workflow
---------------------
.. note:: the *scan* here is not only an experimental thing where one or more
axes are moved while one or more signals arevcollected, the scan can also
be something generated by simulation or data analysis
- / HDF5-like data analysis project (NXroot)
- Group of scans of the same type (e.g. XANES, XES, RIXS, peaks, aligns)
- 1 (Loaded ScanN)
- 1 (raw data)
- axes
- signals
- scalars
- 2 (when no change, just link, no copy)
- axes
- signals
- scalars
- ...
- N (step N in the analysis without changing shape)
- axes
- signals
- scalars
- ...
- N (Rebinned Scan N (rebinning is like generating another scan))
- 1 (rebinned data)
- axes
- counters
- scalars
- M (Merge set of scans something at a given step)
- The scan is the base thing. It is composed of:
- *axes*: single or multiple variables changed over time, like a set of
motors following a trajectory
*type*: 1D arrays, all the same shape
- *signals*: signals read by detectors during movent of the axes or
generated by analysis, that is, for each point in the axes, corresponds
a point in the signals. Differently that axes, signals may refer to
multi-dimensional data like images or MCA datasets
*type*: 1D, 2D or ND arrays for each element in axes
- *scalars*: key-value type data (0D) not changing over the scan shape.
- Actions performed on the single scan:
- *converting*: scale conversion, affects only axes, no change in shape.
- *rebinning*: affects axes and counters.
- *merging*: affects axes and counters.
"""
from silx.gui import qt
from sloth.groups.baseh5 import RootGroup
from sloth.gui.plot.plotarea import PlotArea
from sloth.io.datasource_spech5 import DataSourceSpecH5
from sloth.utils.logging import getLogger
_logger = getLogger("sloth.examples.data_analysis_h5", level="INFO")
class MyDataAnalysisApp(qt.QMainWindow):
"""GUI application containing the whole data analysis workflow"""
def __init__(self, parent=None):
qt.QMainWindow.__init__(self, parent=parent)
if parent is not None:
#: behave as a widget
self.setWindowFlags(qt.Qt.Widget)
else:
self.setWindowTitle("My Great Data Analysis Box")
#: logger
self._logger = _logger
def _init_gui(self):
"""build GUI"""
centralWidget = qt.QWidget(self)
#: instance of plot area
self._plot = PlotArea()
self._logger.info(f"'{self.__class__.__name__}._plot': plot area")
#: assign grid layout
gridLayout = qt.QGridLayout()
gridLayout.setContentsMargins(0, 0, 0, 0)
#: addWidget(widget, row, column, rowSpan, columnSpan[, alignment=0]))
gridLayout.addWidget(self._plot, 0, 0, 2, 2)
#: set grid layout in the central widget
centralWidget.setLayout(gridLayout)
self.setCentralWidget(centralWidget)
self.setMinimumSize(1024, 800)
def _init_data_model(self):
"""init data model"""
self._dm = RootGroup(logger=self._logger)
self._logger.info(f"'{self.__class__.__name__}._dm': data model")
def _init_data_source(self, fname=None):
"""init data source"""
self._ds = DataSourceSpecH5(fname=fname, logger=self._logger)
self._logger.info(
f"'{self.__class__.__name__}._ds': data source (file={fname})"
)
########
# MAIN #
########
def main(fname=None):
"""main with the possibility to load a filename"""
from sloth.utils.jupyter import run_from_ipython
_ipy = run_from_ipython()
from silx import sx
sx.enable_gui()
t = MyDataAnalysisApp()
# t.show()
t._init_gui()
t._init_data_model()
t._init_data_source(fname=fname)
#: now start playing yourself!
_logger.info(f"'{t.__class__.__name__}': called 't' here, ENJOY!")
if not _ipy:
input("Please, run this in IPython. Press ENTER to QUIT")
return t
if __name__ == "__main__":
t = main()
|
maurov/xraysloth
|
examples/data_analysis_h5.py
|
Python
|
bsd-3-clause
| 4,714
|
__author__ = 'tbeltramelli'
import numpy as np
import math
from pylab import *
class UMath:
@staticmethod
def normalize(range_min, range_max, x, x_min, x_max):
return range_min + (((x - x_min) * (range_max - range_min)) / (x_max - x_min))
@staticmethod
def is_in_area(x, y, width, height):
return ((x > width/4) and (x < 3 * (width/4))) and ((y > height/4) and (y < 3 * (height/4)))
@staticmethod
def get_circle_samples(center=(0, 0), radius=1, point_number=30):
s = np.linspace(0, 2 * math.pi, point_number)
return [(radius * np.cos(t) + center[0], radius * np.sin(t) + center[1], np.cos(t), np.sin(t)) for t in s]
@staticmethod
def get_line_coordinates(p1, p2):
(x1, y1) = p1
x1 = int(x1)
y1 = int(y1)
(x2, y2) = p2
x2 = int(x2)
y2 = int(y2)
points = []
is_steep = abs(y2 - y1) > abs(x2 - x1)
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
to_reverse = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
to_reverse = True
delta_x = x2 - x1
delta_y = abs(y2 - y1)
error = int(delta_x / 2)
y = y1
y_step = None
if y1 < y2:
y_step = 1
else:
y_step = -1
for x in range(x1, x2 + 1):
if is_steep:
points.append([y, x])
else:
points.append([x, y])
error -= delta_y
if error < 0:
y += y_step
error += delta_x
if to_reverse:
points.reverse()
result = np.array(points)
all_x = result[:, 0]
all_y = result[:, 1]
return result
@staticmethod
def to_homogenious(points):
return vstack((points, ones((1, points.shape[1]))))
@staticmethod
def get_rotation_translation_matrix(rotation_factor):
rotation_1, rotation_2, translation = tuple(np.hsplit(rotation_factor, 3))
rotation_3 = cross(rotation_1.T, rotation_2.T).T
return np.hstack((rotation_1, rotation_2, rotation_3, translation))
|
tonybeltramelli/Graphics-And-Vision
|
Projective-Geometry/tony/com.tonybeltramelli.homography/UMath.py
|
Python
|
apache-2.0
| 2,192
|
import numpy as np
import json
from ..utils.data_utils import get_file
from .. import backend as K
CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if dim_ordering == 'th':
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
# Zero-center by mean pixel
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
# Zero-center by mean pixel
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
return x
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = np.argpartition(pred, -top)[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
|
jeffery-do/Vizdoombot
|
doom/lib/python3.5/site-packages/keras/applications/imagenet_utils.py
|
Python
|
mit
| 1,644
|
"""
=================================
Box plots with custom fill colors
=================================
This plot illustrates how to create two types of box plots
(rectangular and notched), and how to fill them with custom
colors.
"""
import matplotlib.pyplot as plt
import numpy as np
# Random test data
np.random.seed(123)
all_data = [np.random.normal(0, std, 100) for std in range(1, 4)]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))
# rectangular box plot
bplot1 = axes[0].boxplot(all_data,
vert=True,
showmeans=True,
patch_artist=True)
# notch shape box plot
bplot2 = axes[1].boxplot(all_data,
notch=True,
showmeans=True,
vert=True,
patch_artist=True)
# fill with colors
colors = ['pink', 'lightblue', 'lightgreen']
for bplot in (bplot1, bplot2):
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
# adding horizontal grid lines
for ax in axes:
ax.yaxis.grid(True)
ax.set_xticks([y + 1 for y in range(len(all_data))], )
ax.set_xlabel('xlabel')
ax.set_ylabel('ylabel')
# add x-tick labels
plt.setp(axes, xticks=[y + 1 for y in range(len(all_data))],
xticklabels=['x1', 'x2', 'x3', 'x4'])
plt.show()
|
dariosena/LearningPython
|
general/boxplot_color_demo.py
|
Python
|
gpl-3.0
| 1,415
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
# udpate sales cycle
for d in ['Sales Invoice', 'Sales Order', 'Quotation', 'Delivery Note']:
frappe.db.sql("""update `tab%s` set taxes_and_charges=charge""" % d)
# udpate purchase cycle
for d in ['Purchase Invoice', 'Purchase Order', 'Supplier Quotation', 'Purchase Receipt']:
frappe.db.sql("""update `tab%s` set taxes_and_charges=purchase_other_charges""" % d)
frappe.db.sql("""update tabPurchase_Taxes_and_Charges set parentfield='other_charges'""")
|
suyashphadtare/test
|
erpnext/patches/v4_0/map_charge_to_taxes_and_charges.py
|
Python
|
agpl-3.0
| 666
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo JSON schema."""
from __future__ import absolute_import, print_function, unicode_literals
from flask_babelex import lazy_gettext as _
from invenio_pidrelations.serializers.utils import serialize_relations
from invenio_pidstore.models import PersistentIdentifier
from marshmallow import Schema, ValidationError, fields, missing, \
validates_schema
from werkzeug.routing import BuildError
from zenodo.modules.records.utils import is_deposit
from zenodo.modules.stats.utils import get_record_stats
from ...models import AccessRight, ObjectType
from . import common
class StrictKeysSchema(Schema):
"""Ensure only valid keys exists."""
@validates_schema(pass_original=True)
def check_unknown_fields(self, data, original_data):
"""Check for unknown keys."""
for key in original_data:
if key not in self.fields:
raise ValidationError('Unknown field name {}'.format(key))
class ResourceTypeSchema(StrictKeysSchema):
"""Resource type schema."""
type = fields.Str(
required=True,
error_messages=dict(
required=_('Type must be specified.')
),
)
subtype = fields.Str()
openaire_subtype = fields.Str()
title = fields.Method('get_title', dump_only=True)
def get_title(self, obj):
"""Get title."""
obj = ObjectType.get_by_dict(obj)
return obj['title']['en'] if obj else missing
@validates_schema
def validate_data(self, data):
"""Validate resource type."""
obj = ObjectType.get_by_dict(data)
if obj is None:
raise ValidationError(_('Invalid resource type.'))
def dump_openaire_type(self, obj):
"""Get OpenAIRE subtype."""
acc = obj.get('access_right')
if acc:
return AccessRight.as_category(acc)
return missing
class JournalSchemaV1(StrictKeysSchema):
"""Schema for a journal."""
issue = fields.Str()
pages = fields.Str()
title = fields.Str()
volume = fields.Str()
year = fields.Str()
class MeetingSchemaV1(StrictKeysSchema):
"""Schema for a meeting."""
title = fields.Str()
acronym = fields.Str()
dates = fields.Str()
place = fields.Str()
url = fields.Str()
session = fields.Str()
session_part = fields.Str()
class ImprintSchemaV1(StrictKeysSchema):
"""Schema for imprint."""
publisher = fields.Str()
place = fields.Str()
isbn = fields.Str()
class PartOfSchemaV1(StrictKeysSchema):
"""Schema for imprint."""
pages = fields.Str()
title = fields.Str()
class ThesisSchemaV1(StrictKeysSchema):
"""Schema for thesis."""
university = fields.Str()
supervisors = fields.Nested(common.PersonSchemaV1, many=True)
class FunderSchemaV1(StrictKeysSchema):
"""Schema for a funder."""
doi = fields.Str()
name = fields.Str(dump_only=True)
acronyms = fields.List(fields.Str(), dump_only=True)
links = fields.Method('get_funder_url', dump_only=True)
def get_funder_url(self, obj):
"""Get grant url."""
return dict(self=common.api_link_for('funder', id=obj['doi']))
class GrantSchemaV1(StrictKeysSchema):
"""Schema for a grant."""
title = fields.Str(dump_only=True)
code = fields.Str()
program = fields.Str(dump_only=True)
acronym = fields.Str(dump_only=True)
funder = fields.Nested(FunderSchemaV1)
links = fields.Method('get_grant_url', dump_only=True)
def get_grant_url(self, obj):
"""Get grant url."""
return dict(self=common.api_link_for('grant', id=obj['internal_id']))
class CommunitiesSchemaV1(StrictKeysSchema):
"""Schema for communities."""
id = fields.Function(lambda x: x)
class ActionSchemaV1(StrictKeysSchema):
"""Schema for a actions."""
prereserve_doi = fields.Str(load_only=True)
class FilesSchema(Schema):
"""Files metadata schema."""
type = fields.String()
checksum = fields.String()
size = fields.Integer()
bucket = fields.String()
key = fields.String()
links = fields.Method('get_links')
def get_links(self, obj):
"""Get links."""
return {
'self': common.api_link_for(
'object', bucket=obj['bucket'], key=obj['key'])
}
class OwnerSchema(StrictKeysSchema):
"""Schema for owners.
Allows us to later introduce more properties for an owner.
"""
id = fields.Function(lambda x: x)
class LicenseSchemaV1(StrictKeysSchema):
"""Schema for license.
Allows us to later introduce more properties for an owner.
"""
id = fields.Str(attribute='id')
class MetadataSchemaV1(common.CommonMetadataSchemaV1):
"""Schema for a record."""
resource_type = fields.Nested(ResourceTypeSchema)
access_right_category = fields.Method(
'dump_access_right_category', dump_only=True)
license = fields.Nested(LicenseSchemaV1)
communities = fields.Nested(CommunitiesSchemaV1, many=True)
grants = fields.Nested(GrantSchemaV1, many=True)
journal = fields.Nested(JournalSchemaV1)
meeting = fields.Nested(MeetingSchemaV1)
imprint = fields.Nested(ImprintSchemaV1)
part_of = fields.Nested(PartOfSchemaV1)
thesis = fields.Nested(ThesisSchemaV1)
relations = fields.Method('dump_relations')
def dump_access_right_category(self, obj):
"""Get access right category."""
acc = obj.get('access_right')
if acc:
return AccessRight.as_category(acc)
return missing
def dump_relations(self, obj):
"""Dump the relations to a dictionary."""
if 'relations' in obj:
return obj['relations']
if is_deposit(obj):
pid = self.context['pid']
return serialize_relations(pid)
else:
pid = self.context['pid']
return serialize_relations(pid)
class RecordSchemaV1(common.CommonRecordSchemaV1):
"""Schema for records v1 in JSON."""
files = fields.Nested(
FilesSchema, many=True, dump_only=True, attribute='files')
metadata = fields.Nested(MetadataSchemaV1)
owners = fields.List(
fields.Integer, attribute='metadata.owners', dump_only=True)
revision = fields.Integer(dump_only=True)
updated = fields.Str(dump_only=True)
stats = fields.Method('dump_stats')
def dump_stats(self, obj):
"""Dump the stats to a dictionary."""
if '_stats' in obj.get('metadata', {}):
return obj['metadata'].get('_stats', {})
else:
pid = self.context.get('pid')
if isinstance(pid, PersistentIdentifier):
return get_record_stats(pid.object_uuid, False)
else:
return None
class DepositSchemaV1(RecordSchemaV1):
"""Deposit schema.
Same as the Record schema except for some few extra additions.
"""
files = None
owners = fields.Nested(
OwnerSchema, dump_only=True, attribute='metadata._deposit.owners',
many=True)
status = fields.Str(dump_only=True, attribute='metadata._deposit.status')
recid = fields.Str(dump_only=True, attribute='metadata.recid')
|
slint/zenodo
|
zenodo/modules/records/serializers/schemas/json.py
|
Python
|
gpl-2.0
| 8,136
|
#
# A new home for the reporting code.
#
# This code is part of the LWN git data miner.
#
# Copyright 2007-13 Eklektix, Inc.
# Copyright 2007-13 Jonathan Corbet <corbet@lwn.net>
#
# This file may be distributed under the terms of the GNU General
# Public License, version 2.
#
import sys
Outfile = sys.stdout
HTMLfile = None
ListCount = 999999
def SetOutput(file):
global Outfile
Outfile = file
def SetHTMLOutput(file):
global HTMLfile
HTMLfile = file
def SetMaxList(max):
global ListCount
ListCount = max
def Write(stuff):
Outfile.write(stuff)
#
# HTML output support stuff.
#
HTMLclass = 0
HClasses = ['Even', 'Odd']
THead = '''<p>
<table cellspacing=3>
<tr><th colspan=3>%s</th></tr>
'''
def BeginReport(title):
global HTMLclass
Outfile.write('\n%s\n' % title)
if HTMLfile:
HTMLfile.write(THead % title)
HTMLclass = 0
TRow = ''' <tr class="%s">
<td>%s</td><td align="right">%d</td><td align="right">%.1f%%</td></tr>
'''
TRowStr = ''' <tr class="%s">
<td>%s</td><td align="right">%d</td><td>%s</td></tr>
'''
def ReportLine(text, count, pct):
global HTMLclass
if count == 0:
return
Outfile.write ('%-25s %4d (%.1f%%)\n' % (text, count, pct))
if HTMLfile:
HTMLfile.write(TRow % (HClasses[HTMLclass], text, count, pct))
HTMLclass ^= 1
def ReportLineStr(text, count, extra):
global HTMLclass
if count == 0:
return
Outfile.write ('%-25s %4d %s\n' % (text, count, extra))
if HTMLfile:
HTMLfile.write(TRowStr % (HClasses[HTMLclass], text, count, extra))
HTMLclass ^= 1
def EndReport():
if HTMLfile:
HTMLfile.write('</table>\n\n')
#
# Comparison and report generation functions.
#
def ComparePCount(h1, h2):
return len(h2.patches) - len(h1.patches)
def ReportByPCount(hlist, cscount):
hlist.sort(ComparePCount)
count = 0
BeginReport('Developers with the most changesets')
for h in hlist:
pcount = len(h.patches)
changed = max(h.added, h.removed)
delta = h.added - h.removed
if pcount > 0:
ReportLine(h.name, pcount, (pcount*100.0)/cscount)
count += 1
if count >= ListCount:
break
EndReport()
def CompareLChanged(h1, h2):
return h2.changed - h1.changed
def ReportByLChanged(hlist, totalchanged):
hlist.sort(CompareLChanged)
count = 0
BeginReport('Developers with the most changed lines')
for h in hlist:
pcount = len(h.patches)
if h.changed > 0:
ReportLine(h.name, h.changed, (h.changed*100.0)/totalchanged)
count += 1
if count >= ListCount:
break
EndReport()
def CompareLRemoved(h1, h2):
return (h2.removed - h2.added) - (h1.removed - h1.added)
def ReportByLRemoved(hlist, totalremoved):
hlist.sort(CompareLRemoved)
count = 0
BeginReport('Developers with the most lines removed')
for h in hlist:
pcount = len(h.patches)
changed = max(h.added, h.removed)
delta = h.added - h.removed
if delta < 0:
ReportLine(h.name, -delta, (-delta*100.0)/totalremoved)
count += 1
if count >= ListCount:
break
EndReport()
def CompareEPCount(e1, e2):
return e2.count - e1.count
def ReportByPCEmpl(elist, cscount):
elist.sort(CompareEPCount)
count = 0
BeginReport('Top changeset contributors by employer')
for e in elist:
if e.count != 0:
ReportLine(e.name, e.count, (e.count*100.0)/cscount)
count += 1
if count >= ListCount:
break
EndReport()
def CompareELChanged(e1, e2):
return e2.changed - e1.changed
def ReportByELChanged(elist, totalchanged):
elist.sort(CompareELChanged)
count = 0
BeginReport('Top lines changed by employer')
for e in elist:
if e.changed != 0:
ReportLine(e.name, e.changed, (e.changed*100.0)/totalchanged)
count += 1
if count >= ListCount:
break
EndReport()
def CompareSOBs(h1, h2):
return len(h2.signoffs) - len(h1.signoffs)
def ReportBySOBs(hlist):
hlist.sort(CompareSOBs)
totalsobs = 0
for h in hlist:
totalsobs += len(h.signoffs)
count = 0
BeginReport('Developers with the most signoffs (total %d)' % totalsobs)
for h in hlist:
scount = len(h.signoffs)
if scount > 0:
ReportLine(h.name, scount, (scount*100.0)/totalsobs)
count += 1
if count >= ListCount:
break
EndReport()
#
# Reviewer reporting.
#
def CompareRevs(h1, h2):
return len(h2.reviews) - len(h1.reviews)
def ReportByRevs(hlist):
hlist.sort(CompareRevs)
totalrevs = 0
for h in hlist:
totalrevs += len(h.reviews)
count = 0
BeginReport('Developers with the most reviews (total %d)' % totalrevs)
for h in hlist:
scount = len(h.reviews)
if scount > 0:
ReportLine(h.name, scount, (scount*100.0)/totalrevs)
count += 1
if count >= ListCount:
break
EndReport()
#
# tester reporting.
#
def CompareTests(h1, h2):
return len(h2.tested) - len(h1.tested)
def ReportByTests(hlist):
hlist.sort(CompareTests)
totaltests = 0
for h in hlist:
totaltests += len(h.tested)
count = 0
BeginReport('Developers with the most test credits (total %d)' % totaltests)
for h in hlist:
scount = len(h.tested)
if scount > 0:
ReportLine(h.name, scount, (scount*100.0)/totaltests)
count += 1
if count >= ListCount:
break
EndReport()
def CompareTestCred(h1, h2):
return h2.testcred - h1.testcred
def ReportByTestCreds(hlist):
hlist.sort(CompareTestCred)
totaltests = 0
for h in hlist:
totaltests += h.testcred
count = 0
BeginReport('Developers who gave the most tested-by credits (total %d)' % totaltests)
for h in hlist:
if h.testcred > 0:
ReportLine(h.name, h.testcred, (h.testcred*100.0)/totaltests)
count += 1
if count >= ListCount:
break
EndReport()
#
# Reporter reporting.
#
def CompareReports(h1, h2):
return len(h2.reports) - len(h1.reports)
def ReportByReports(hlist):
hlist.sort(CompareReports)
totalreps = 0
for h in hlist:
totalreps += len(h.reports)
count = 0
BeginReport('Developers with the most report credits (total %d)' % totalreps)
for h in hlist:
scount = len(h.reports)
if scount > 0:
ReportLine(h.name, scount, (scount*100.0)/totalreps)
count += 1
if count >= ListCount:
break
EndReport()
def CompareRepCred(h1, h2):
return h2.repcred - h1.repcred
def ReportByRepCreds(hlist):
hlist.sort(CompareRepCred)
totalreps = 0
for h in hlist:
totalreps += h.repcred
count = 0
BeginReport('Developers who gave the most report credits (total %d)' % totalreps)
for h in hlist:
if h.repcred > 0:
ReportLine(h.name, h.repcred, (h.repcred*100.0)/totalreps)
count += 1
if count >= ListCount:
break
EndReport()
#
# Versions.
#
def CompareVersionCounts(h1, h2):
if h1.versions and h2.versions:
return len(h2.versions) - len(h1.versions)
if h2.versions:
return 1
if h1.versions:
return -1
return 0
def MissedVersions(hv, allv):
missed = [v for v in allv if v not in hv]
missed.reverse()
return ' '.join(missed)
def ReportVersions(hlist):
hlist.sort(CompareVersionCounts)
BeginReport('Developers represented in the most kernel versions')
count = 0
allversions = hlist[0].versions
for h in hlist:
ReportLineStr(h.name, len(h.versions), MissedVersions(h.versions, allversions))
count += 1
if count >= ListCount:
break
EndReport()
def CompareESOBs(e1, e2):
return e2.sobs - e1.sobs
def ReportByESOBs(elist):
elist.sort(CompareESOBs)
totalsobs = 0
for e in elist:
totalsobs += e.sobs
count = 0
BeginReport('Employers with the most signoffs (total %d)' % totalsobs)
for e in elist:
if e.sobs > 0:
ReportLine(e.name, e.sobs, (e.sobs*100.0)/totalsobs)
count += 1
if count >= ListCount:
break
EndReport()
def CompareHackers(e1, e2):
return len(e2.hackers) - len(e1.hackers)
def ReportByEHackers(elist):
elist.sort(CompareHackers)
totalhackers = 0
for e in elist:
totalhackers += len(e.hackers)
count = 0
BeginReport('Employers with the most hackers (total %d)' % totalhackers)
for e in elist:
nhackers = len(e.hackers)
if nhackers > 0:
ReportLine(e.name, nhackers, (nhackers*100.0)/totalhackers)
count += 1
if count >= ListCount:
break
EndReport()
def DevReports(hlist, totalchanged, cscount, totalremoved):
ReportByPCount(hlist, cscount)
ReportByLChanged(hlist, totalchanged)
ReportByLRemoved(hlist, totalremoved)
ReportBySOBs(hlist)
ReportByRevs(hlist)
ReportByTests(hlist)
ReportByTestCreds(hlist)
ReportByReports(hlist)
ReportByRepCreds(hlist)
def EmplReports(elist, totalchanged, cscount):
ReportByPCEmpl(elist, cscount)
ReportByELChanged(elist, totalchanged)
ReportByESOBs(elist)
ReportByEHackers(elist)
#
# Who are the unknown hackers?
#
def IsUnknown(h):
empl = h.employer[0][0][1].name
return h.email[0] == empl or empl == '(Unknown)'
def ReportUnknowns(hlist, cscount):
#
# Trim the list to just the unknowns; try to work properly whether
# mapping to (Unknown) is happening or not.
#
ulist = [ h for h in hlist if IsUnknown(h) ]
ulist.sort(ComparePCount)
count = 0
BeginReport('Developers with unknown affiliation')
for h in ulist:
pcount = len(h.patches)
if pcount > 0:
ReportLine(h.name, pcount, (pcount*100.0)/cscount)
count += 1
if count >= ListCount:
break
EndReport()
def ReportByFileType(hacker_list):
total = {}
total_by_hacker = {}
BeginReport('Developer contributions by type')
for h in hacker_list:
by_hacker = {}
for patch in h.patches:
# Get a summary by hacker
for (filetype, (added, removed)) in patch.filetypes.iteritems():
if by_hacker.has_key(filetype):
by_hacker[filetype][patch.ADDED] += added
by_hacker[filetype][patch.REMOVED] += removed
else:
by_hacker[filetype] = [added, removed]
# Update the totals
if total.has_key(filetype):
total[filetype][patch.ADDED] += added
total[filetype][patch.REMOVED] += removed
else:
total[filetype] = [added, removed, []]
# Print a summary by hacker
print h.name
for filetype, counters in by_hacker.iteritems():
print '\t', filetype, counters
h_added = by_hacker[filetype][patch.ADDED]
h_removed = by_hacker[filetype][patch.REMOVED]
total[filetype][2].append([h.name, h_added, h_removed])
# Print the global summary
BeginReport('Contributions by type and developers')
for filetype, (added, removed, hackers) in total.iteritems():
print filetype, added, removed
for h, h_added, h_removed in hackers:
print '\t%s: [%d, %d]' % (h, h_added, h_removed)
# Print the very global summary
BeginReport('General contributions by type')
for filetype, (added, removed, hackers) in total.iteritems():
print filetype, added, removed
#
# The file access report is a special beast.
#
def FileAccessReport(name, accesses, total):
outf = open(name, 'w')
files = accesses.keys()
files.sort()
for file in files:
a = accesses[file]
outf.write('%6d %6.1f%% %s\n' % (a, (100.0*a)/total, file))
outf.close()
|
cbrune/onie
|
contrib/git-stats/gitdm/reports.py
|
Python
|
gpl-2.0
| 12,188
|
# models and fields
from celery import group
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
import django.contrib.postgres.fields as psql
# vault
from jinja2 import Template
from redcap.vault import vault
from vault import VaultKeyManager
# django signals
from django.db.models.signals import pre_delete
from django.dispatch import receiver
# L10n
from django.utils.translation import ugettext_lazy as _
# crypto helpers
import keymanager.helpers as rsa
from runner.execute_helpers import exec_on_local, exec_on_remote, prepare_output
class Client(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=64)
second_name = models.CharField(max_length=64)
email = models.EmailField(null=True, default=None, blank=True)
contacts = psql.JSONField(null=True, default=None, blank=True)
def __str__(self):
return str.format("{0} {1}", self.name, self.second_name)
class Meta:
db_table = 'manager_clients'
verbose_name = _('Client')
verbose_name_plural = _('Clients')
class Project(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=64)
description = models.TextField(null=True, default=None, blank=True)
url = models.URLField(null=True, default=None, blank=True)
owner = models.ForeignKey('Client', null=True, default=None, blank=True)
class Meta:
db_table = 'manager_projects'
verbose_name = _('Project')
verbose_name_plural = _('Projects')
def __str__(self):
return self.title.__str__()
class Playbook(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=32)
playbook = models.TextField()
local = models.BooleanField(default=False)
class Meta:
db_table = 'manager_playbooks'
verbose_name = _('Playbook')
verbose_name_plural = _('Playbooks')
def __str__(self):
return self.title.__str__()
def execute(self, host: str, user: str, ssh_port: int, key: str, params: dict, password=None) -> (int, str, str):
template = Template(self.playbook)
commands = template.render(params).splitlines()
if not self.local:
res = exec_on_remote(host, user, ssh_port, key, commands, password)
else:
res = exec_on_local(commands)
return res
class Server(models.Model):
id = models.AutoField(primary_key=True)
username = models.CharField(max_length=256)
ssh_port = models.IntegerField(default=22)
domain = models.CharField(null=True, default=None, blank=True, max_length=256)
ip_v4 = models.GenericIPAddressField(null=True, default=None, blank=True)
ip_v6 = models.GenericIPAddressField(null=True, default=None, blank=True)
private_key = models.TextField(null=True, default=None, blank=True)
public_key = models.TextField(null=True, default=None, blank=True)
name = models.CharField(max_length=32)
description = models.TextField(null=True, default=None, blank=True)
def save(self, **kwargs):
super(Server, self).save()
if self.private_key is None or self.public_key is None:
self.invalidate_key()
@property
def key_pass(self) -> str or None:
if self.id is not None:
return VaultKeyManager(vault).get(str.format('secret/server_{0}', self.id))['data']['key']
return None
def invalidate_key(self) -> None:
if self.id is not None:
new_key_pass = rsa.random_key(64)
self.private_key, self.public_key = rsa.opsenssh_keypair(new_key_pass)
VaultKeyManager(vault).set(str.format('secret/server_{0}', self.id), key=new_key_pass)
self.save()
def __str__(self) -> str:
return self.name.__str__()
class Meta:
db_table = 'manager_servers'
verbose_name = _('Server')
verbose_name_plural = _('Servers')
class BuildPipeline(models.Model):
playbook = models.ForeignKey('Playbook')
build_target = models.ForeignKey('BuildTarget')
order = models.PositiveIntegerField()
def __str__(self):
return self.playbook.__str__()
class Meta:
db_table = 'manager_build_pipeline'
unique_together = ('playbook', 'build_target')
verbose_name = _('Server pipeline')
verbose_name_plural = _('Server pipeline')
ordering = ('order',)
class BuildTarget(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=32)
params = psql.JSONField(null=True, default=None, blank=True)
pipeline = models.ManyToManyField('Playbook', through='BuildPipeline', through_fields=('build_target', 'playbook'))
project = models.ForeignKey('Project')
server = models.ForeignKey('Server')
def __str__(self):
return self.name.__str__()
def execute(self, params: {} = None) -> (int, str, str):
playbooks = self.pipeline.order_by('buildpipeline__order').all()
if playbooks is None:
raise ObjectDoesNotExist(str.format('playbooks for server with id {0} not found', self.server.id))
server = self.server
host = None
fail_stderr = None
output = None
success = True
if server.ip_v4 is not None:
host = server.ip_v4
elif server.ip_v6 is not None:
host = server.ip_v6
elif server.domain is not None:
host = server.domain
if params is not None:
parameters = {**self.params, **params}
else:
parameters = self.params
for playbook in playbooks:
try:
res = playbook.execute(host,
server.username,
server.ssh_port,
server.private_key,
parameters,
server.key_pass)
_success, _output, _fail_stderr = prepare_output(res)
except Exception as e:
_output = e.__str__()
_fail_stderr = e.__str__()
_success = False
ActionHistory(output=_output, server=server, playbook_id=playbook.id).save()
if not _success:
success = False
output = _output
fail_stderr = _fail_stderr
break
return success, output, fail_stderr
class Meta:
db_table = 'manager_build_targets'
verbose_name = _('Build Target')
verbose_name_plural = _('Build Targets')
class ActionHistory(models.Model):
id = models.AutoField(primary_key=True)
server = models.ForeignKey('Server')
playbook = models.ForeignKey('Playbook')
output = psql.JSONField()
date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.date.__str__()
class Meta:
db_table = 'manager_servers_action_history'
verbose_name = _('Action history')
verbose_name_plural = _('Action history')
class GroupBuildPipeline(models.Model):
group = models.ForeignKey('BuildGroup')
build = models.ForeignKey('BuildTarget')
order = models.PositiveIntegerField()
def __str__(self):
return self.build.name.__str__()
class Meta:
db_table = 'manager_group_build_pipeline'
unique_together = ('group', 'build')
verbose_name = _('Group pipeline')
verbose_name_plural = _('Group pipeline')
ordering = ('order',)
class BuildGroup(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=64)
builds = models.ManyToManyField('BuildTarget', through='GroupBuildPipeline', through_fields=('group', 'build'))
parallel = models.BooleanField(default=False)
trigger_on_success = models.ForeignKey('BuildGroup', related_name='success_trigger', null=True, blank=True)
trigger_on_fail = models.ForeignKey('BuildGroup', related_name='fail_trigger', null=True, blank=True)
def __str__(self):
return self.name.__str__()
@staticmethod
def errors(res: [(int, str, str)]) -> [(bool, str, str)]:
result = []
if res is not None:
for r, i, o in res:
if r != 0:
result.append((False, i, o))
return result
def execute(self) -> [(int, str, str)]:
builds = self.builds.order_by('groupbuildpipeline__order').all()
res = []
for build in builds:
res.append(build.execute())
errors = BuildGroup.errors(res)
if errors.__len__() > 0:
return errors
return None
class Meta:
db_table = 'manager_group'
verbose_name = _('Group')
verbose_name_plural = _('Groups')
@receiver(pre_delete)
def delete_repo(sender, instance, **kwargs):
if sender == Server:
vault.delete(str.format('secret/server_{0}', instance.id))
|
berylTechnologies/redcap
|
task_manager/models.py
|
Python
|
gpl-3.0
| 9,052
|
'''Unit test for plural6.py'''
import plural6
import unittest
class KnownValues(unittest.TestCase):
def test_sxz(self):
'words ending in S, X, and Z'
nouns = {
'bass': 'basses',
'bus': 'buses',
'walrus': 'walruses',
'box': 'boxes',
'fax': 'faxes',
'suffix': 'suffixes',
'mailbox': 'mailboxes',
'buzz': 'buzzes',
'waltz': 'waltzes'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_h(self):
'words ending in H'
nouns = {
'coach': 'coaches',
'glitch': 'glitches',
'rash': 'rashes',
'watch': 'watches',
'cheetah': 'cheetahs',
'cough': 'coughs'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_y(self):
'words ending in Y'
nouns = {
'utility': 'utilities',
'vacancy': 'vacancies',
'boy': 'boys',
'day': 'days'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_ouce(self):
'words ending in OUSE'
nouns = {
'mouse': 'mice',
'louse': 'lice'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_child(self):
'special case: child'
nouns = {
'child': 'children'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_oot(self):
'special case: foot'
nouns = {
'foot': 'feet'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_ooth(self):
'words ending in OOTH'
nouns = {
'booth': 'booths',
'tooth': 'teeth'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_f_ves(self):
'words ending in F that become VES'
nouns = {
'leaf': 'leaves',
'loaf': 'loaves'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_sis(self):
'words ending in SIS'
nouns = {
'thesis': 'theses'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_man(self):
'words ending in MAN'
nouns = {
'man': 'men',
'mailman': 'mailmen',
'human': 'humans',
'roman': 'romans'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_ife(self):
'words ending in IFE'
nouns = {
'knife': 'knives',
'wife': 'wives',
'lowlife': 'lowlifes'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_eau(self):
'words ending in EAU'
nouns = {
'tableau': 'tableaux'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_elf(self):
'words ending in ELF'
nouns = {
'elf': 'elves',
'shelf': 'shelves',
'delf': 'delfs',
'pelf': 'pelfs'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_same(self):
'words that are their own plural'
nouns = {
'sheep': 'sheep',
'deer': 'deer',
'fish': 'fish',
'moose': 'moose',
'aircraft': 'aircraft',
'series': 'series',
'haiku': 'haiku'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
def test_default(self):
'unexceptional words'
nouns = {
'papaya': 'papayas',
'whip': 'whips',
'palimpsest': 'palimpsests'
}
for singular, plural in nouns.items():
self.assertEqual(plural6.plural(singular), plural)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
ctasims/Dive-Into-Python-3
|
examples/pluraltest6.py
|
Python
|
mit
| 6,072
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around chrome.
Replaces all the child processes (renderer, GPU, plugins and utility) with the
IPC fuzzer. The fuzzer will then play back a specified testcase.
Depends on ipc_fuzzer being available on the same directory as chrome.
"""
from __future__ import print_function
import argparse
import os
import platform
import subprocess
import sys
CHROME_BINARY_FOR_PLATFORM_DICT = {
'LINUX': 'chrome',
'MAC': 'Chromium.app/Contents/MacOS/Chromium',
'WINDOWS': 'chrome.exe',
}
def GetPlatform():
platform = None
if sys.platform.startswith('win'):
platform = 'WINDOWS'
elif sys.platform.startswith('linux'):
platform = 'LINUX'
elif sys.platform == 'darwin':
platform = 'MAC'
assert platform is not None
return platform
def main():
desc = 'Wrapper to run chrome with child processes replaced by IPC fuzzers'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--out-dir',
dest='out_dir',
default='out',
help='output directory under src/ directory')
parser.add_argument(
'--build-type',
dest='build_type',
default='Release',
help='Debug vs. Release build')
parser.add_argument(
'--gdb-browser',
dest='gdb_browser',
default=False,
action='store_true',
help='run browser process inside gdb')
parser.add_argument('testcase', help='IPC file to be replayed')
parser.add_argument(
'chrome_args',
nargs=argparse.REMAINDER,
help='any additional arguments are passed to chrome')
args = parser.parse_args()
platform = GetPlatform()
chrome_binary = CHROME_BINARY_FOR_PLATFORM_DICT[platform]
fuzzer_binary = 'ipc_fuzzer_replay'
if platform == 'WINDOWS':
fuzzer_binary += '.exe'
script_path = os.path.realpath(__file__)
ipc_fuzzer_dir = os.path.join(os.path.dirname(script_path), os.pardir)
src_dir = os.path.abspath(os.path.join(ipc_fuzzer_dir, os.pardir, os.pardir))
out_dir = os.path.join(src_dir, args.out_dir)
build_dir = os.path.join(out_dir, args.build_type)
chrome_path = os.path.join(build_dir, chrome_binary)
if not os.path.exists(chrome_path):
print('chrome executable not found at ', chrome_path)
return 1
fuzzer_path = os.path.join(build_dir, fuzzer_binary)
if not os.path.exists(fuzzer_path):
print('fuzzer executable not found at ', fuzzer_path)
print('ensure GYP_DEFINES="enable_ipc_fuzzer=1" and build target ' +
fuzzer_binary + '.')
return 1
prefixes = {
'--renderer-cmd-prefix',
'--plugin-launcher',
'--ppapi-plugin-launcher',
'--utility-cmd-prefix',
}
chrome_command = [
chrome_path,
'--ipc-fuzzer-testcase=' + args.testcase,
'--no-sandbox',
'--disable-kill-after-bad-ipc',
'--disable-mojo-channel',
]
if args.gdb_browser:
chrome_command = ['gdb', '--args'] + chrome_command
launchers = {}
for prefix in prefixes:
launchers[prefix] = fuzzer_path
for arg in args.chrome_args:
if arg.find('=') != -1:
switch, value = arg.split('=', 1)
if switch in prefixes:
launchers[switch] = value + ' ' + launchers[switch]
continue
chrome_command.append(arg)
for switch, value in launchers.items():
chrome_command.append(switch + '=' + value)
command_line = ' '.join(['\'' + arg + '\'' for arg in chrome_command])
print('Executing: ' + command_line)
return subprocess.call(chrome_command)
if __name__ == '__main__':
sys.exit(main())
|
scheib/chromium
|
tools/ipc_fuzzer/scripts/play_testcase.py
|
Python
|
bsd-3-clause
| 3,671
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-07 13:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('events', '0001_initial'),
('howtos', '0001_initial'),
('wagtailimages', '0013_make_rendition_upload_callable'),
('articles', '0002_auto_20160907_1506'),
('organizations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='howtopageorganizationpage',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='how_to_page', to='organizations.OrganizationPage'),
),
migrations.AddField(
model_name='howtopagenewsarticlepage',
name='article',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='how_to_page', to='articles.NewsArticlePage'),
),
migrations.AddField(
model_name='howtopagenewsarticlepage',
name='how_to_page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='news_article_pages', to='howtos.HowToPage'),
),
migrations.AddField(
model_name='howtopageeventpage',
name='event',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='how_to_page', to='events.EventPage'),
),
migrations.AddField(
model_name='howtopageeventpage',
name='how_to_page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_pages', to='howtos.HowToPage'),
),
migrations.AddField(
model_name='howtopage',
name='featured_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Featured image'),
),
]
|
jeremy-c/unusualbusiness
|
unusualbusiness/howtos/migrations/0002_auto_20160907_1506.py
|
Python
|
bsd-3-clause
| 2,240
|
from django.conf import settings
from django.db import connections, models
from django.utils import translation
import multidb
from gelato.translations.models import Translation
from gelato.translations.fields import TranslatedField
isnull = """IF(!ISNULL({t1}.localized_string), {t1}.{col}, {t2}.{col})
AS {name}_{col}"""
join = """LEFT OUTER JOIN translations {t}
ON ({t}.id={model}.{name} AND {t}.locale={locale})"""
no_locale_join = """LEFT OUTER JOIN translations {t}
ON {t}.id={model}.{name}"""
trans_fields = [f.name for f in Translation._meta.fields]
def build_query(model, connection):
qn = connection.ops.quote_name
selects, joins, params = [], [], []
# The model can define a fallback locale (which may be a Field).
if hasattr(model, 'get_fallback'):
fallback = model.get_fallback()
else:
fallback = settings.LANGUAGE_CODE
if not hasattr(model._meta, 'translated_fields'):
model._meta.translated_fields = [f for f in model._meta.fields
if isinstance(f, TranslatedField)]
# Add the selects and joins for each translated field on the model.
for field in model._meta.translated_fields:
if isinstance(fallback, models.Field):
fallback_str = '%s.%s' % (qn(model._meta.db_table),
qn(fallback.column))
else:
fallback_str = '%s'
name = field.column
d = {'t1': 't1_' + name, 't2': 't2_' + name,
'model': qn(model._meta.db_table), 'name': name}
selects.extend(isnull.format(col=f, **d) for f in trans_fields)
joins.append(join.format(t=d['t1'], locale='%s', **d))
params.append(translation.get_language())
if field.require_locale:
joins.append(join.format(t=d['t2'], locale=fallback_str, **d))
if not isinstance(fallback, models.Field):
params.append(fallback)
else:
joins.append(no_locale_join.format(t=d['t2'], **d))
# ids will be added later on.
sql = """SELECT {model}.{pk}, {selects} FROM {model} {joins}
WHERE {model}.{pk} IN {{ids}}"""
s = sql.format(selects=','.join(selects), joins='\n'.join(joins),
model=qn(model._meta.db_table), pk=model._meta.pk.column)
return s, params
def get_trans(items):
if not items:
return
connection = connections[multidb.get_slave()]
cursor = connection.cursor()
model = items[0].__class__
sql, params = build_query(model, connection)
item_dict = dict((item.pk, item) for item in items)
ids = ','.join(map(str, item_dict.keys()))
cursor.execute(sql.format(ids='(%s)' % ids), tuple(params))
step = len(trans_fields)
for row in cursor.fetchall():
# We put the item's pk as the first selected field.
item = item_dict[row[0]]
for index, field in enumerate(model._meta.translated_fields):
start = 1 + step * index
t = Translation(*row[start:start+step])
if t.id is not None and t.localized_string is not None:
setattr(item, field.name, t)
|
washort/gelato.models
|
gelato/translations/transformer.py
|
Python
|
bsd-3-clause
| 3,203
|
import py
from rpython.jit.metainterp.test.test_virtualizable import ImplicitVirtualizableTests
from rpython.jit.backend.arm.test.support import JitARMMixin
class TestVirtualizable(JitARMMixin, ImplicitVirtualizableTests):
def test_blackhole_should_not_reenter(self):
py.test.skip("Assertion error & llinterp mess")
|
oblique-labs/pyVM
|
rpython/jit/backend/arm/test/test_virtualizable.py
|
Python
|
mit
| 330
|
#!/usr/bin/python
from datetime import datetime, timedelta
from httplib import BadStatusLine # This keeps happening but is not a problem on the code's end
import httplib2
import mongokit
from optparse import OptionParser
from pymongo import MongoClient
from random import random
import time
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run_flow
class MessageCannotHaveBoth(Exception):
pass
class ContentTypeNotAllowed(Exception):
pass
class DatabaseNameRequired(Exception):
pass
class YouTubeResponse:
""" Convert a response dict into an object to play with """
def __init__(self, **entries):
self.__dict__.update(entries)
def __repr__(self):
return "<YouTubeResponse kind: '%s'>" % self.__dict__.get("kind", "^\o_O/^")
def convert_to_new_response(convert_this, convert_to_object = False):
"""Convert all dictionaries to objects"""
if not convert_to_object:
return convert_this
if type(convert_this) == dict:
new_object = YouTubeResponse()
for key,value in convert_this.items():
value = convert_to_new_response(value, convert_to_object = convert_to_object)
new_object.__dict__.update( {key: value} )
return new_object
if type(convert_this) in (list, tuple):
temp = []
for e1 in convert_this:
e1 = convert_to_new_response(e1, convert_to_object = convert_to_object)
temp.append(e1)
if type(convert_this) == tuple:
temp = tuple(temp)
return temp
else:
return convert_this
class MongoConnection(object):
""" General Handler for Mongo connections """
connections = {}
def __init__(self):
pass
def get_connection(self, database = "", host = "127.0.0.1", port = 27017, username = "", password = ""):
if not database:
raise DatabaseNameRequired()
# If the connection has already been made, don't make a new one
if self.connections.get(database):
return self.connections[database]
# MongoDB Connection
if username and password:
conn_string = "mongodb://{0}:{1}@{2}:{3}/{4}".format(username, password, host, port, database)
else:
conn_string = "mongodb://{0}:{1}/{2}".format(host, port, database)
connection = MongoClient(conn_string)
self.connections[database] = connection
return connection
mongo_connections = MongoConnection()
class Youtube(object):
def __init__(self, auth_host_name = "localhost", auth_host_port = [8080], noauth_local_webserver = False, logging_level = "DEBUG", client_secrets_file = "client_secrets.json", storage_file = "oauth2.json"):
""" Returns a Youtube connection through the Google APIs using the OAuth2 method """
parser = OptionParser()
(flags, args) = parser.parse_args()
flags.auth_host_name = auth_host_name
flags.auth_host_port = auth_host_port
flags.noauth_local_webserver = noauth_local_webserver # This means use the local server for authing
flags.logging_level = logging_level # "DEBUG, CRITICAL, ERROR, FATAL",
YOUTUBE_SCOPE = "https://www.googleapis.com/auth/youtube"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
flow = flow_from_clientsecrets(client_secrets_file, scope=YOUTUBE_SCOPE, message="Missing or Bad client secrets file.")
storage = Storage(storage_file)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, flags)
self.youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=credentials.authorize(httplib2.Http()))
# MongoDB Connection, Database, Collections setup
self.connection = mongo_connections.get_connection(database = "mytube")
self.database = self.connection.mytube
# Collections
self.channels = self.database.channels
self.videos = self.database.videos
self.user_data = self.database.user_data
# def call(resource, action, parameters, failures = 0):
# """ This is a work in progress, it's going to consume all google API calls and try to handle any strange exceptions """
# try:
# self.youtube.resource().action(**parameters)
# except BadStatusLine, e:
# if failures >= 2:
# raise # It just keeps failing
# else:
# time.wait(1)
# self.call(resource = resource, action = action, parameters = parameters, failures = failures + 1)
def get_uploads(self):
channels_response = self.youtube.channels().list(mine=True, part="contentDetails").execute()
for channel in channels_response["items"]:
uploads_list_id = channel["contentDetails"]["relatedPlaylists"]["uploads"]
print("Videos in list %s" % uploads_list_id)
playlistitems_list_request = self.youtube.playlistItems().list(
playlistId=uploads_list_id,
part="snippet",
maxResults=50
)
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
for playlist_item in playlistitems_list_response["items"]:
title = playlist_item["snippet"]["title"]
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
print("%s (%s)" % (title, video_id))
playlistitems_list_request = self.youtube.playlistItems().list_next(
playlistitems_list_request, playlistitems_list_response)
print()
def post_bulletin(self, message, video_id = None, playlist_id = None):
""" You may post any message with a video id or playlist id, not both """
if video_id and playlist_id:
raise MessageCannotHaveBoth("video_id and playlist_id")
body = dict( snippet=dict(description=message) )
if video_id:
body["contentDetails"] = dict( bulletin=dict(resourceId=dict(
kind="youtube#video",
videoId=video_id)) )
if playlist_id:
body["contentDetails"] = dict( bulletin=dict(resourceId=dict(
kind="youtube#playlist",
playlistId=playlist_id)) )
self.youtube.activities().insert( part=",".join(body.keys()), body=body ).execute()
return "Success"
def get_subscriptions(self, convert_response_to_object = False):
full_list = []
details = self.youtube.subscriptions().list(
mine=True,
part="id,snippet,contentDetails",
maxResults = 50
).execute()
full_list.extend(details["items"])
total_subscriptions = details["pageInfo"]["totalResults"]
while details.get("nextPageToken"):
details = self.youtube.subscriptions().list(
mine=True,
part="id,snippet,contentDetails",
pageToken = details["nextPageToken"],
maxResults = 50
).execute()
full_list.extend(details["items"])
return convert_to_new_response(full_list, convert_to_object = convert_response_to_object)
def get_activities_for(self, channel_id, since = None, content_type = "all", convert_response_to_object = False):
""" Docs: https://developers.google.com/youtube/v3/docs/activities
snippet.type may be :
bulletin
channelItem
comment
favorite
like
playlistItem
recommendation
social
subscription
upload
content_type = "all" # Gives back everything
"""
if content_type not in ("bulletin",
"channelItem",
"comment",
"favorite",
"like",
"playlistItem",
"recommendation",
"social",
"subscription",
"upload",
"all"):
raise ContentTypeNotAllowed(content_type)
full_list = []
details = self.youtube.activities().list(
part="id,snippet,contentDetails",
channelId = channel_id,
maxResults = 50,
publishedAfter = since.strftime("%Y-%m-%dT%T-0600")
).execute()
if content_type == "all":
full_list.extend(details["items"])
else:
for item in details["items"]:
if item["snippet"]["type"] == content_type:
full_list.append(item)
while details.get("nextPageToken"):
details = self.youtube.activities().list(
part="id,snippet,contentDetails",
channelId = channel_id,
maxResults = 50,
publishedAfter = since.strftime("%Y-%m-%dT%T-0600"),
pageToken = details["nextPageToken"]
).execute()
if content_type == "all":
full_list.extend(details["items"])
else:
for item in details["items"]:
if item["snippet"]["type"] == content_type:
full_list.append(item)
return convert_to_new_response(full_list, convert_to_object = convert_response_to_object)
def get_uploads_from_subscriptions(self, since = None):
subs = self.get_subscriptions()
for sub in subs:
# print(sub.snippet.resourceId.channelId)
sub.activities = self.get_activities_for(
sub.snippet.resourceId.channelId,
since = since,
content_type = "upload")
return subs
def get_categories(self):
user = self.user_data.find_one({"user": "Kusinwolf"})
return {"categories": user["categories"]}
def get_uploads_from_category(self, category):
user = self.user_data.find_one({"user": "Kusinwolf"})
if category not in user["categories"]:
raise Exception("Bad Category")
new_uploads = []
for sub_cat in user["subscription_to_category"]:
channel_id = sub_cat.get("channel_id")
channel_category = sub_cat.get("category")
last_checked = sub_cat.get("last_checked")
if channel_category == category:
new_uploads.extend( self.get_activities_for(
channel_id = channel_id,
since = last_checked,
content_type ="upload") )
return new_uploads
|
kusinwolf/mytube
|
Youtube.py
|
Python
|
apache-2.0
| 10,932
|
from django.conf import settings
from django.template.base import (Library, Node, Variable,
TOKEN_BLOCK, TOKEN_COMMENT, TOKEN_TEXT, TOKEN_VAR,
TemplateSyntaxError, VariableDoesNotExist, Context)
from django.utils.encoding import smart_str
from django.templatetags.cache import CacheNode
from phased.utils import pickle_context, flatten_context, backup_csrf_token, second_pass_render
register = Library()
def parse(parser):
"""
Parse to the end of a phased block. This is different than Parser.parse()
in that it does not generate Node objects; it simply yields tokens.
"""
depth = 0
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK:
if token.contents == 'phased':
depth += 1
elif token.contents == 'endphased':
depth -= 1
if depth < 0:
break
yield token
if not parser.tokens and depth >= 0:
parser.unclosed_block_tag(('endphased',))
class PhasedNode(Node):
"""
Generated by {% phased %} tag. Outputs the literal content of the phased
block with pickled context, enclosed in a delimited block that can be
parsed by the second pass rendering middleware.
"""
def __init__(self, content, var_names):
self.var_names = var_names
self.content = content
def __repr__(self):
return "<Phased Node: '%s'>" % smart_str(self.content[:25], 'ascii',
errors='replace')
def render(self, context):
# our main context
storage = Context()
# stash the whole context if needed
if getattr(settings, 'PHASED_KEEP_CONTEXT', False):
storage.update(flatten_context(context))
# but check if there are variables specifically wanted
for var_name in self.var_names:
if var_name[0] in ('"', "'") and var_name[-1] == var_name[0]:
var_name = var_name[1:-1]
try:
storage[var_name] = Variable(var_name).resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError(
'"phased" tag got an unknown variable: %r' % var_name)
storage = backup_csrf_token(context, storage)
# lastly return the pre phased template part
return u'%(delimiter)s%(content)s%(pickled)s%(delimiter)s' % {
'content': self.content,
'delimiter': settings.PHASED_SECRET_DELIMITER,
'pickled': pickle_context(storage),
}
@register.tag
def phased(parser, token):
"""
Template tag to denote a template section to render a second time via
a middleware.
Usage::
{% load phased_tags %}
{% phased with [var1] [var2] .. %}
.. some content to be rendered a second time ..
{% endphased %}
You can pass it a list of context variable names to automatically
save those variables for the second pass rendering of the template,
e.g.::
{% load phased_tags %}
{% phased with comment_count object %}
There are {{ comment_count }} comments for "{{ object }}".
{% endphased %}
Alternatively you can also set the ``PHASED_KEEP_CONTEXT`` setting to
``True`` to automatically keep the whole context for each phased block.
Note: Lazy objects such as messages and csrf tokens aren't kept.
"""
literal = ''.join({
TOKEN_BLOCK: '{%% %s %%}',
TOKEN_VAR: '{{ %s }}',
TOKEN_COMMENT: '{# %s #}',
TOKEN_TEXT: '%s',
}[token.token_type] % token.contents for token in parse(parser))
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'with':
raise TemplateSyntaxError(u"'%r' tag requires the second argument to be 'with'." % tokens[0])
if len(tokens) == 2:
raise TemplateSyntaxError(u"'%r' tag requires at least one context variable name." % tokens[0])
return PhasedNode(literal, tokens[2:])
class PhasedCacheNode(CacheNode):
def render(self, context):
"""
Template tag that acts like Django's cached tag
except that it does a second pass rendering.
Requires `RequestContext` and
`django.core.context_processors.request` to be in
TEMPLATE_CONTEXT_PROCESSORS
"""
content = super(PhasedCacheNode, self).render(context)
return second_pass_render(context['request'], content)
@register.tag
def phasedcache(parser, token):
"""
Taken from ``django.templatetags.cache`` and changed ending tag.
This will cache the contents of a template fragment for a given amount
of time and do a second pass render on the contents.
Usage::
{% load phased_tags %}
{% phasedcache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% phased %}
.. some request specific stuff ..
{% endphased %}
{% endphasedcache %}
This tag also supports varying by a list of arguments::
{% load phased_tags %}
{% phasedcache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% phased %}
.. some request specific stuff ..
{% endphased %}
{% endphasedcache %}
Each unique set of arguments will result in a unique cache entry.
The tag will take care that the phased tags are properly rendered.
It requires usage of ``RequestContext`` and
``django.core.context_processors.request`` to be in the
``TEMPLATE_CONTEXT_PROCESSORS`` setting.
"""
nodelist = parser.parse(('endphasedcache',))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) < 3:
raise TemplateSyntaxError(u"'%r' tag requires at least 2 arguments." % tokens[0])
return PhasedCacheNode(nodelist, tokens[1], tokens[2], tokens[3:])
|
mab2k/django-phased
|
phased/templatetags/phased_tags.py
|
Python
|
bsd-3-clause
| 5,955
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class HybridConnectionKey(ProxyOnlyResource):
"""Hybrid Connection key contract. This has the send key name and value for a
Hybrid Connection.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar send_key_name: The name of the send key.
:vartype send_key_name: str
:ivar send_key_value: The value of the send key.
:vartype send_key_value: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'send_key_name': {'readonly': True},
'send_key_value': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'send_key_name': {'key': 'properties.sendKeyName', 'type': 'str'},
'send_key_value': {'key': 'properties.sendKeyValue', 'type': 'str'},
}
def __init__(self, kind=None):
super(HybridConnectionKey, self).__init__(kind=kind)
self.send_key_name = None
self.send_key_value = None
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/hybrid_connection_key.py
|
Python
|
mit
| 1,924
|
# This file is part of Rubber and thus covered by the GPL
import rubber.dvip_tool
import rubber.module_interface
class Module (rubber.module_interface.Module):
def __init__ (self, document, opt):
self.dep = rubber.dvip_tool.Dvip_Tool_Dep_Node (document, 'dvips')
|
skapfer/rubber
|
src/latex_modules/dvips.py
|
Python
|
gpl-2.0
| 278
|
#!/usr/bin/env python
# encoding: utf-8
'''
Created by Brian Cherinka on 2016-04-26 09:20:35
Licensed under a 3-clause BSD license.
Revision History:
Initial Version: 2016-04-26 09:20:35 by Brian Cherinka
Last Modified On: 2016-04-26 09:20:35 by Brian
'''
import numpy
from decimal import Decimal
from psycopg2.extensions import register_adapter, AsIs, new_type, DECIMAL, register_type
# See:
# http://rehalcon.blogspot.com/2010/03/sqlalchemy-programmingerror-cant-adapt.html
# and
# http://initd.org/psycopg/docs/advanced.html#adapting-new-python-types-to-sql-syntax
# and
# http://pyopengl.sourceforge.net/pydoc/numpy.core.numerictypes.html
#
# http://numpy.sourceforge.net/numdoc/HTML/numdoc.htm
''' numpy data types:
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
'''
DEC2FLOAT = new_type(DECIMAL.values, 'DEC2FLOAT', lambda value,
curs: float(value) if value is not None else None)
register_type(DEC2FLOAT)
def adapt_decimal(Decimal):
return AsIs(float)
register_adapter(Decimal, adapt_decimal)
def adapt_numpy_int8(numpy_int8):
return AsIs(numpy_int8)
register_adapter(numpy.int8, adapt_numpy_int8)
def adapt_numpy_int16(numpy_int16):
return AsIs(numpy_int16)
register_adapter(numpy.int16, adapt_numpy_int16)
def adapt_numpy_int32(numpy_int32):
return AsIs(numpy_int32)
register_adapter(numpy.int32, adapt_numpy_int32)
def adapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.int64, adapt_numpy_int64)
# def adapt_numpy_int128(numpy_int128):
# return AsIs(numpy_int128)
# register_adapter(numpy.int128, adapt_numpy_int128)
def adapt_numpy_uint8(numpy_uint8):
return AsIs(numpy_uint8)
register_adapter(numpy.uint8, adapt_numpy_uint8)
def adapt_numpy_uint16(numpy_uint16):
return AsIs(numpy_uint16)
register_adapter(numpy.uint16, adapt_numpy_uint16)
def adapt_numpy_uint32(numpy_uint32):
return AsIs(numpy_uint32)
register_adapter(numpy.uint32, adapt_numpy_uint32)
def adapt_numpy_uint64(numpy_uint64):
return AsIs(numpy_uint64)
register_adapter(numpy.uint64, adapt_numpy_uint64)
# def adapt_numpy_uint128(numpy_uint128):
# return AsIs(numpy_uint128)
# register_adapter(numpy.uint128, adapt_numpy_uint128)
# def adapt_numpy_float16(numpy_float16):
# return AsIs(numpy_float16)
# register_adapter(numpy.float16, adapt_numpy_float16)
def adapt_numpy_float32(numpy_float32):
return AsIs(numpy_float32)
register_adapter(numpy.float32, adapt_numpy_float32)
def adapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
register_adapter(numpy.float64, adapt_numpy_float64)
# def adapt_numpy_float96(numpy_float96):
# return AsIs(numpy_float96)
# register_adapter(numpy.float96, adapt_numpy_float96)
# def adapt_numpy_float128(numpy_float128):
# return AsIs(numpy_float128)
# register_adapter(numpy.float128, adapt_numpy_float128)
# def adapt_numpy_float256(numpy_float256):
# return AsIs(numpy_float256)
# register_adapter(numpy.float256, adapt_numpy_float256)
# def adapt_numpy_complex32(numpy_complex32):
# return AsIs(numpy_complex32)
# register_adapter(numpy.complex32, adapt_numpy_complex32)
# def adapt_numpy_complex64(numpy_complex64):
# return AsIs(numpy_complex64)
# register_adapter(numpy.complex64, adapt_numpy_complex64)
# def adapt_numpy_complex128(numpy_complex128):
# return AsIs(numpy_complex128)
# register_adapter(numpy.complex128, adapt_numpy_complex128)
# def adapt_numpy_complex192(numpy_complex192):
# return AsIs(numpy_complex192)
# register_adapter(numpy.complex192, adapt_numpy_complex192)
# def adapt_numpy_complex256(numpy_complex256):
# return AsIs(numpy_complex256)
# register_adapter(numpy.complex256, adapt_numpy_complex256)
# def adapt_numpy_complex512(numpy_complex512):
# return AsIs(numpy_complex512)
# register_adapter(numpy.complex512, adapt_numpy_complex512)
def adapt_numpy_nan(numpy_nan):
return "'NaN'"
register_adapter(numpy.nan, adapt_numpy_nan)
def adapt_numpy_inf(numpy_inf):
return "'Infinity'"
register_adapter(numpy.inf, adapt_numpy_inf)
def adapt_numpy_ndarray(numpy_ndarray):
return AsIs(numpy_ndarray.tolist())
register_adapter(numpy.ndarray, adapt_numpy_ndarray)
|
bretthandrews/marvin
|
python/marvin/db/NumpyAdaptors.py
|
Python
|
bsd-3-clause
| 4,343
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 05 07:55:56 2016
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import
import h5py
import numpy as np
import sklearn.decomposition as dec
from ..io.hdf_utils import checkIfMain
from ..io.hdf_utils import getH5DsetRefs, checkAndLinkAncillary
from ..io.io_hdf5 import ioHDF5
from ..io.io_utils import check_dtype, transformToTargetType
from ..io.microdata import MicroDataGroup, MicroDataset
class Decomposition(object):
"""
Pycroscopy wrapper around the sklearn.decomposition classes
"""
def __init__(self, h5_main, method_name, n_components=None, *args, **kwargs):
"""
Constructs the Decomposition object
Parameters
------------
h5_main : HDF5 dataset object
Main dataset with ancillary spectroscopic, position indices and values datasets
method_name : string / unicode
Name of the sklearn.cluster estimator
n_components : (Optional) unsigned int
Number of components for decomposition
*args and **kwargs : arguments to be passed to the estimator
"""
if n_components is not None:
kwargs['n_components'] = n_components
allowed_methods = ['FactorAnalysis','FastICA','IncrementalPCA',
'MiniBatchSparsePCA','NMF','PCA','RandomizedPCA',
'SparsePCA','TruncatedSVD']
# check if h5_main is a valid object - is it a hub?
if not checkIfMain(h5_main):
raise TypeError('Supplied dataset is not a pycroscopy main dataset')
if method_name not in allowed_methods:
raise TypeError('Cannot work with {} just yet'.format(method_name))
self.h5_main = h5_main
# Instantiate the decomposition object
self.estimator = dec.__dict__[method_name].__call__(*args, **kwargs)
self.method_name = method_name
# figure out the operation that needs need to be performed to convert to real scalar
retval = check_dtype(h5_main)
self.data_transform_func, self.data_is_complex, self.data_is_compound, \
self.data_n_features, self.data_n_samples, self.data_type_mult = retval
def doDecomposition(self):
"""
Decomposes the hdf5 dataset, and writes the ? back to the hdf5 file
Returns
--------
h5_group : HDF5 Group reference
Reference to the group that contains the decomposition results
"""
self._fit()
self._transform()
return self._writeToHDF5(transformToTargetType(self.estimator.components_, self.h5_main.dtype),
self.projection)
def _fit(self):
"""
Fits the provided dataset
Returns
------
None
"""
# perform fit on the real dataset
if self.method_name=='NMF':
self.estimator.fit(self.data_transform_func(np.abs(self.h5_main)))
else:
self.estimator.fit(self.data_transform_func(self.h5_main))
def _transform(self, data=None):
"""
Transforms the original OR provided dataset with previously computed fit
Parameters
--------
data : (optional) HDF5 dataset
Dataset to apply the transform to.
The number of elements in the first axis of this dataset should match that of the original dataset that was fitted
Returns
------
None
"""
if data is None:
if self.method_name=='NMF':
self.projection = self.estimator.transform(self.data_transform_func(np.abs(self.h5_main)))
else:
self.projection = self.estimator.transform(self.data_transform_func(self.h5_main))
else:
if isinstance(data, h5py.Dataset):
if data.shape[0] == self.h5_main.shape[0]:
self.projection = self.estimator.transform(data)
def _writeToHDF5(self, components, projection):
"""
Writes the labels and mean response to the h5 file
Parameters
------------
labels : 1D unsigned int array
Array of cluster labels as obtained from the fit
mean_response : 2D numpy array
Array of the mean response for each cluster arranged as [cluster number, response]
Returns
---------
h5_labels : HDF5 Group reference
Reference to the group that contains the clustering results
"""
ds_components = MicroDataset('Components', components)# equivalent to V
ds_projections = MicroDataset('Projection', np.float32(projection)) # equivalent of U compound
decomp_ind_mat = np.transpose(np.atleast_2d(np.arange(components.shape[0])))
ds_decomp_inds = MicroDataset('Decomposition_Indices', np.uint32(decomp_ind_mat))
ds_decomp_vals = MicroDataset('Decomposition_Values', np.float32(decomp_ind_mat))
# write the labels and the mean response to h5
decomp_slices = {'Decomp': (slice(None), slice(0, 1))}
ds_decomp_inds.attrs['labels'] = decomp_slices
ds_decomp_inds.attrs['units'] = ['']
ds_decomp_vals.attrs['labels'] = decomp_slices
ds_decomp_vals.attrs['units'] = ['']
decomp_grp = MicroDataGroup(self.h5_main.name.split('/')[-1] + '-Decomposition_', self.h5_main.parent.name[1:])
decomp_grp.addChildren([ds_components, ds_projections, ds_decomp_inds, ds_decomp_vals])
decomp_grp.attrs['num_components'] = components.shape[0]
decomp_grp.attrs['num_samples'] = self.h5_main.shape[0]
decomp_grp.attrs['decomposition_algorithm'] = self.method_name
'''
Get the parameters of the estimator used and write them
as attributes of the group
'''
for parm in self.estimator.get_params().keys():
decomp_grp.attrs[parm] = self.estimator.get_params()[parm]
hdf = ioHDF5(self.h5_main.file)
h5_decomp_refs = hdf.writeData(decomp_grp)
h5_components = getH5DsetRefs(['Components'], h5_decomp_refs)[0]
h5_projections = getH5DsetRefs(['Mean_Response'], h5_decomp_refs)[0]
h5_decomp_inds = getH5DsetRefs(['Decomposition_Indices'], h5_decomp_refs)[0]
h5_decomp_vals = getH5DsetRefs(['Decomposition_Values'], h5_decomp_refs)[0]
checkAndLinkAncillary(h5_projections,
['Position_Indices', 'Position_Values'],
h5_main=self.h5_main)
checkAndLinkAncillary(h5_projections,
['Spectroscopic_Indices', 'Spectroscopic_Values'],
anc_refs=[h5_decomp_inds, h5_decomp_vals])
checkAndLinkAncillary(h5_components,
['Spectroscopic_Indices', 'Spectroscopic_Values'],
h5_main=self.h5_main)
checkAndLinkAncillary(h5_components,
['Position_Indices', 'Position_Values'],
anc_refs=[h5_decomp_inds, h5_decomp_vals])
# return the h5 group object
return h5_components.parent
|
anugrah-saxena/pycroscopy
|
pycroscopy/processing/decomposition.py
|
Python
|
mit
| 7,296
|
#!/usr/bin/env python
'''
Command to send data extracted from prometheus endpoints to monitoring systems
For example config see prometheus_metrics.yml.example in the same folder this script is
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
#If we break the few lines it will be harder to read in this case
#pylint: disable=line-too-long
import argparse
import logging
import sys
import yaml
import requests
from openshift_tools.monitoring.metric_sender import MetricSender
from prometheus_client.parser import text_string_to_metric_families
logger = logging.getLogger()
logger.setLevel(logging.INFO)
consolelog = logging.StreamHandler()
logger.addHandler(consolelog)
class PrometheusMetricSender(object):
""" class to gather all metrics from prometheus metrics endpoints """
def __init__(self):
self.args = None
self.parser = None
self.config = None
self.metric_sender = MetricSender()
def parse_args(self):
'''Parse the arguments for this script'''
self.parser = argparse.ArgumentParser(description="Script that gathers metrics from prometheus endpoints")
self.parser.add_argument('-d', '--debug', default=False,
action="store_true", help="debug mode")
self.parser.add_argument('-t', '--test', default=False,
action="store_true", help="Run the script but don't send to monitoring systems")
self.parser.add_argument('-c', '--configfile', default='/etc/openshift_tools/prometheus_metrics.yml',
help="Config file that contains metrics to be collected, defaults to prometheus_metrics.yml")
self.args = self.parser.parse_args()
@staticmethod
def call_api(rest_path):
''' Makes REST call to given url'''
try:
response = requests.get(rest_path)
except requests.exceptions.ConnectionError:
logger.exception('Error talking to the rest endpoint given: %s', rest_path)
else:
return response.content
def read_metric(self, met):
''' read a prometheus endpoint and create data for monitoring systems'''
return_data = {}
content = self.call_api(met['url'])
if content is not None:
for metric in text_string_to_metric_families(content):
# skipping histogram and summary types unless we find a good way to add them to zabbix (unlikely)
if metric.type in ['histogram', 'summary']:
continue
elif metric.type in ['counter', 'gauge']:
if metric.name in met['metrics']:
zmetric_name = '{}.{}'.format(met['name'], metric.name.replace('_', '.'))
logger.debug('Sending: %s - %s', zmetric_name, metric.samples[0][2])
return_data[zmetric_name] = metric.samples[0][2]
else:
logger.debug('We are skipping metric, not requested: %s', metric.name)
else:
logger.error('Unknown metric type: %s - %s', metric.type, metric.name)
return return_data
@staticmethod
def check_endpoint(endpoint_config):
''' Just a quick check to make sure the config file has the keys required and they are not empty
for example, the expected endpoint config should have a valid name, url, and metrics listed
- name: 'podchecker'
url: 'http://podchecker.projectawesome.svc.cluster.local:1234/metrics'
metrics:
- 'podchecker_awesome_stats'
- 'podchecker_memory_usage'
if any of the above config options are not present or empty, PrometheusMetricsSender skips the endpoint
'''
for item in set(('name', 'url', 'metrics')):
if not endpoint_config.get(item):
return False
return True
def run(self):
''' Get data from prometheus metrics endpoints
'''
self.parse_args()
if self.args.debug:
logger.setLevel(logging.DEBUG)
try:
with open(self.args.configfile, 'r') as configfile:
self.config = yaml.safe_load(configfile)
except IOError:
logger.exception('There was a problem opening the config file')
logger.error('Exiting because of above problem')
sys.exit(1)
for target in self.config['endpoints']:
if self.check_endpoint(target):
self.metric_sender.add_metric(self.read_metric(target))
self.send_zagg_data()
def send_zagg_data(self):
''' Sending the data to monitoring or displaying it in console when test option is used
'''
if not self.args.test:
self.metric_sender.send_metrics()
else:
self.metric_sender.print_unique_metrics()
if __name__ == '__main__':
PMS = PrometheusMetricSender()
PMS.run()
|
rhdedgar/openshift-tools
|
scripts/monitoring/cron-send-prometheus-data.py
|
Python
|
apache-2.0
| 5,124
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015-2019 by Inteos Sp. z o.o.
# All rights reserved. See LICENSE file for details.
#
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Widgets(models.Model):
name = models.TextField(unique=True)
icon = models.CharField(max_length=32)
widgetid = models.CharField(unique=True, blank=False, max_length=32)
template = models.TextField(blank=False)
templatejs = models.TextField(blank=False)
height = models.IntegerField(null=True, default=189)
class Meta:
ordering = ('id',)
|
inteos/IBAdmin
|
dashboard/models.py
|
Python
|
agpl-3.0
| 608
|
#!/usr/bin/env python3
import unittest, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbosity", default=2, type=int)
v = parser.parse_args().verbosity
loader = unittest.TestLoader()
suite = loader.discover(start_dir="tests", pattern="*.py")
runner = unittest.TextTestRunner(verbosity=v)
result = runner.run(suite)
|
mymedia2/vk-cli
|
launch_tests.py
|
Python
|
lgpl-3.0
| 343
|
import base64
import logging
from urllib import urlencode
from decimal import getcontext
from dateutil.tz import tzutc
import httplib2
from sharpy.exceptions import CheddarError, AccessDenied, BadRequest, NotFound, PreconditionFailed, CheddarFailure, NaughtyGateway, UnprocessableEntity
client_log = logging.getLogger('SharpyClient')
class Client(object):
default_endpoint = 'https://cheddargetter.com/xml'
def __init__(self, username, password, product_code, cache=None, timeout=None, endpoint=None):
'''
username - Your cheddargetter username (probably an email address)
password - Your cheddargetter password
product_code - The product code for the product you want to work with
cache - A file system path or an object which implements the httplib2
cache API (optional)
timeout - Socket level timout in seconds (optional)
endpoint - An alternate API endpoint (optional)
'''
self.username = username
self.password = password
self.product_code = product_code
self.endpoint = endpoint or self.default_endpoint
self.cache = cache
self.timeout = timeout
super(Client, self).__init__()
def build_url(self, path, params=None):
'''
Constructs the url for a cheddar API resource
'''
url = u'%s/%s/productCode/%s' % (
self.endpoint,
path,
self.product_code,
)
if params:
for key, value in params.items():
url = u'%s/%s/%s' % (url, key, value)
return url
def format_datetime(self, to_format):
if to_format == 'now':
str_dt = to_format
else:
if getattr(to_format, 'tzinfo', None) is not None:
utc_value = to_format.astimezone(tzutc())
else:
utc_value = to_format
str_dt = utc_value.strftime('%Y-%m-%dT%H:%M:%S+00:00')
return str_dt
def format_date(self, to_format):
if to_format == 'now':
str_dt = to_format
else:
if getattr(to_format, 'tzinfo', None) is not None:
utc_value = to_format.astimezone(tzutc())
else:
utc_value = to_format
str_dt = utc_value.strftime('%Y-%m-%d')
return str_dt
def make_request(self, path, params=None, data=None, method=None):
'''
Makes a request to the cheddar api using the authentication and
configuration settings available.
'''
# Setup values
url = self.build_url(path, params)
client_log.debug('Requesting: %s' % url)
method = method or 'GET'
body = None
headers = {}
if data:
method = 'POST'
body = urlencode(data)
headers = {
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
client_log.debug('Request Method: %s' % method)
client_log.debug('Request Body(Data): %s' % data)
client_log.debug('Request Body(Raw): %s' % body)
# Setup http client
h = httplib2.Http(cache=self.cache, timeout=self.timeout)
#h.add_credentials(self.username, self.password)
# Skip the normal http client behavior and send auth headers immediately
# to save an http request.
headers['Authorization'] = "Basic %s" % base64.standard_b64encode(self.username + ':' + self.password).strip()
# Make request
response, content = h.request(url, method, body=body, headers=headers)
status = response.status
client_log.debug('Response Status: %d' % status)
client_log.debug('Response Content: %s' % content)
if status != 200 and status != 302:
exception_class = CheddarError
if status == 401:
exception_class = AccessDenied
elif status == 400:
exception_class = BadRequest
elif status == 404:
exception_class = NotFound
elif status == 412:
exception_class = PreconditionFailed
elif status == 500:
exception_class = CheddarFailure
elif status == 502:
exception_class = NaughtyGateway
elif status == 422:
exception_class = UnprocessableEntity
raise exception_class(response, content)
response.content = content
return response
|
SeanOC/sharpy
|
sharpy/client.py
|
Python
|
bsd-3-clause
| 4,556
|
from event import Event
class SensorEvent(Event):
def __init__(self, t, label, value):
Event.__init__(self, t, label)
self.value = value
def getValue(self):
return self.value
def __repr__(self):
return "SensorEvent["+str(self.label)+",t="+str(self.t)+",value="+str(self.value)+"]"
|
sonologic/thermo2
|
src/py/sensor_event.py
|
Python
|
gpl-2.0
| 330
|
# furElise.py
# Generates the theme from Beethoven's Fur Elise.
from music import *
# theme has some repetition, so break it up to maximize economy
# (also notice how we line up corresponding pitches and durations)
pitches1 = [E5, DS5, E5, DS5, E5, B4, D5, C5]
durations1 = [SN, SN, SN, SN, SN, SN, SN, SN]
pitches2 = [A4, REST, C4, E4, A4, B4, REST, E4]
durations2 = [EN, SN, SN, SN, SN, EN, SN, SN]
pitches3 = [GS4, B4, C5, REST, E4]
durations3 = [SN, SN, EN, SN, SN]
pitches4 = [C5, B4, A4]
durations4 = [SN, SN, EN]
# create an empty phrase, and construct theme from the above motifs
theme = Phrase()
theme.addNoteList(pitches1, durations1)
theme.addNoteList(pitches2, durations2)
theme.addNoteList(pitches3, durations3)
theme.addNoteList(pitches1, durations1) # again
theme.addNoteList(pitches2, durations2)
theme.addNoteList(pitches4, durations4)
# play it
Play.midi(theme)
|
manaris/jythonMusic
|
2. furElise.py
|
Python
|
gpl-3.0
| 908
|
#
# example from CHiLL manual page 18
#
# shift a loop
#
from chill import *
source('shift_to.c')
destination('shift_to2modified.c')
procedure('mm')
loop(0)
known('ambn > 0')
known('an > 0')
known('bm > 3')
shift_to( 1, 2, 3 )
|
ztuowen/chill-dev
|
examples/chill/testcases/shift_to2.script.py
|
Python
|
gpl-3.0
| 241
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
#
# Authors: Ruda Moura <rmoura@redhat.com>
# Lucas Meneghel Rodrigues <lmr@redhat.com>
#
"""
This module contains handy classes that can be used inside
avocado core code or plugins.
"""
class Borg:
"""
Multiple instances of this class will share the same state.
This is considered a better design pattern in Python than
more popular patterns, such as the Singleton. Inspired by
Alex Martelli's article mentioned below:
:see: http://www.aleax.it/5ep.html
"""
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
class LazyProperty(object):
"""
Lazily instantiated property.
Use this decorator when you want to set a property that will only be
evaluated the first time it's accessed. Inspired by the discussion in
the Stack Overflow thread below:
:see: http://stackoverflow.com/questions/15226721/
"""
def __init__(self, f_get):
self.f_get = f_get
self.func_name = f_get.__name__
def __get__(self, obj, cls):
if obj is None:
return None
value = self.f_get(obj)
setattr(obj, self.func_name, value)
return value
|
will-Do/avocado
|
avocado/utils/data_structures.py
|
Python
|
gpl-2.0
| 1,698
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
import const
from socket import gethostbyname
from BitTorrent.platform import bttime as time
from sha import sha
import re
from BitTorrent.defaultargs import common_options, rare_options
from BitTorrent.RawServer_magic import RawServer
from ktable import KTable, K
from knode import *
from kstore import KStore
from khash import newID, newIDInRange
from util import packNodes
from actions import FindNode, GetValue, KeyExpirer, StoreValue
import krpc
import sys
import os
import traceback
from BitTorrent.bencode import bencode, bdecode
from BitTorrent.defer import Deferred
from random import randrange
from kstore import sample
from threading import Event, Thread
ip_pat = re.compile('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
class KhashmirDBExcept(Exception):
pass
def foo(bytes):
pass
# this is the base class, has base functionality and find node, no key-value mappings
class KhashmirBase:
_Node = KNodeBase
def __init__(self, host, port, data_dir, rawserver=None, max_ul_rate=1024, checkpoint=True, errfunc=None, rlcount=foo, config={'pause':False, 'max_rate_period':20}):
if rawserver:
self.rawserver = rawserver
else:
self.flag = Event()
d = dict([(x[0],x[1]) for x in common_options + rare_options])
self.rawserver = RawServer(self.flag, d)
self.max_ul_rate = max_ul_rate
self.socket = None
self.config = config
self.setup(host, port, data_dir, rlcount, checkpoint)
def setup(self, host, port, data_dir, rlcount, checkpoint=True):
self.host = host
self.port = port
self.ddir = data_dir
self.store = KStore()
self.pingcache = {}
self.socket = self.rawserver.create_udpsocket(self.port, self.host, False)
self.udp = krpc.hostbroker(self, (self.host, self.port), self.socket, self.rawserver.add_task, self.max_ul_rate, self.config, rlcount)
self._load()
self.rawserver.start_listening_udp(self.socket, self.udp)
self.last = time()
KeyExpirer(self.store, self.rawserver.add_task)
self.refreshTable(force=1)
if checkpoint:
self.rawserver.add_task(self.findCloseNodes, 30, (lambda a: a, True))
self.rawserver.add_task(self.checkpoint, 60, (1,))
def Node(self):
n = self._Node(self.udp.connectionForAddr)
n.table = self
return n
def __del__(self):
if self.socket is not None:
self.rawserver.stop_listening_udp(self.socket)
self.socket.close()
def _load(self):
do_load = False
try:
s = open(os.path.join(self.ddir, "routing_table"), 'r').read()
dict = bdecode(s)
except:
id = newID()
else:
id = dict['id']
do_load = True
self.node = self._Node(self.udp.connectionForAddr).init(id, self.host, self.port)
self.table = KTable(self.node)
if do_load:
self._loadRoutingTable(dict['rt'])
def checkpoint(self, auto=0):
d = {}
d['id'] = self.node.id
d['rt'] = self._dumpRoutingTable()
try:
f = open(os.path.join(self.ddir, "routing_table"), 'wb')
f.write(bencode(d))
f.close()
except:
#XXX real error here
print ">>> unable to dump routing table!", str(e)
pass
if auto:
self.rawserver.add_task(self.checkpoint,
randrange(int(const.CHECKPOINT_INTERVAL * .9),
int(const.CHECKPOINT_INTERVAL * 1.1)),
(1,))
def _loadRoutingTable(self, nodes):
"""
load routing table nodes from database
it's usually a good idea to call refreshTable(force=1) after loading the table
"""
for rec in nodes:
n = self.Node().initWithDict(rec)
self.table.insertNode(n, contacted=0, nocheck=True)
def _dumpRoutingTable(self):
"""
save routing table nodes to the database
"""
l = []
for bucket in self.table.buckets:
for node in bucket.l:
l.append({'id':node.id, 'host':node.host, 'port':node.port, 'age':int(node.age)})
return l
def _addContact(self, host, port, callback=None):
"""
ping this node and add the contact info to the table on pong!
"""
n =self.Node().init(const.NULL_ID, host, port)
try:
self.sendPing(n, callback=callback)
except krpc.KRPCSelfNodeError:
# our own node
pass
#######
####### LOCAL INTERFACE - use these methods!
def addContact(self, ip, port, callback=None):
"""
ping this node and add the contact info to the table on pong!
"""
if ip_pat.match(ip):
self._addContact(ip, port)
else:
def go(ip=ip, port=port):
ip = gethostbyname(ip)
self.rawserver.external_add_task(self._addContact, 0, (ip, port))
t = Thread(target=go)
t.start()
## this call is async!
def findNode(self, id, callback, errback=None):
""" returns the contact info for node, or the k closest nodes, from the global table """
# get K nodes out of local table/cache, or the node we want
nodes = self.table.findNodes(id, invalid=True)
l = [x for x in nodes if x.invalid]
if len(l) > 4:
nodes = sample(l , 4) + self.table.findNodes(id, invalid=False)[:4]
d = Deferred()
if errback:
d.addCallbacks(callback, errback)
else:
d.addCallback(callback)
if len(nodes) == 1 and nodes[0].id == id :
d.callback(nodes)
else:
# create our search state
state = FindNode(self, id, d.callback, self.rawserver.add_task)
self.rawserver.external_add_task(state.goWithNodes, 0, (nodes,))
def insertNode(self, n, contacted=1):
"""
insert a node in our local table, pinging oldest contact in bucket, if necessary
If all you have is a host/port, then use addContact, which calls this method after
receiving the PONG from the remote node. The reason for the seperation is we can't insert
a node into the table without it's peer-ID. That means of course the node passed into this
method needs to be a properly formed Node object with a valid ID.
"""
old = self.table.insertNode(n, contacted=contacted)
if old and old != n:
if not old.inPing():
self.checkOldNode(old, n, contacted)
else:
l = self.pingcache.get(old.id, [])
if len(l) < 10 or contacted:
l.append((n, contacted))
self.pingcache[old.id] = l
def checkOldNode(self, old, new, contacted=False):
## these are the callbacks used when we ping the oldest node in a bucket
def cmp(a, b):
if a[1] == 1 and b[1] == 0:
return -1
elif b[1] == 1 and a[1] == 0:
return 1
else:
return 0
def _staleNodeHandler(dict, old=old, new=new, contacted=contacted):
""" called if the pinged node never responds """
if old.fails >= 2:
l = self.pingcache.get(old.id, [])
l.sort(cmp)
if l:
n, nc = l[0]
if (not contacted) and nc:
l = l[1:] + [(new, contacted)]
new = n
contacted = nc
o = self.table.replaceStaleNode(old, new)
if o and o != new:
self.checkOldNode(o, new)
try:
self.pingcache[o.id] = self.pingcache[old.id]
del(self.pingcache[old.id])
except KeyError:
pass
else:
if l:
del(self.pingcache[old.id])
l.sort(cmp)
for node in l:
self.insertNode(node[0], node[1])
else:
l = self.pingcache.get(old.id, [])
if l:
del(self.pingcache[old.id])
self.insertNode(new, contacted)
for node in l:
self.insertNode(node[0], node[1])
def _notStaleNodeHandler(dict, old=old, new=new, contacted=contacted):
""" called when we get a pong from the old node """
self.table.insertNode(old, True)
self.insertNode(new, contacted)
l = self.pingcache.get(old.id, [])
l.sort(cmp)
for node in l:
self.insertNode(node[0], node[1])
try:
del(self.pingcache[old.id])
except KeyError:
pass
try:
df = old.ping(self.node.id)
except krpc.KRPCSelfNodeError:
pass
df.addCallbacks(_notStaleNodeHandler, _staleNodeHandler)
def sendPing(self, node, callback=None):
"""
ping a node
"""
try:
df = node.ping(self.node.id)
except krpc.KRPCSelfNodeError:
pass
else:
## these are the callbacks we use when we issue a PING
def _pongHandler(dict, node=node, table=self.table, callback=callback):
_krpc_sender = dict['_krpc_sender']
dict = dict['rsp']
sender = {'id' : dict['id']}
sender['host'] = _krpc_sender[0]
sender['port'] = _krpc_sender[1]
n = self.Node().initWithDict(sender)
table.insertNode(n)
if callback:
callback()
def _defaultPong(err, node=node, table=self.table, callback=callback):
if callback:
callback()
df.addCallbacks(_pongHandler,_defaultPong)
def findCloseNodes(self, callback=lambda a: a, auto=False):
"""
This does a findNode on the ID one away from our own.
This will allow us to populate our table with nodes on our network closest to our own.
This is called as soon as we start up with an empty table
"""
if not self.config['pause']:
id = self.node.id[:-1] + chr((ord(self.node.id[-1]) + 1) % 256)
self.findNode(id, callback)
if auto:
if not self.config['pause']:
self.refreshTable()
self.rawserver.external_add_task(self.findCloseNodes, randrange(int(const.FIND_CLOSE_INTERVAL *0.9),
int(const.FIND_CLOSE_INTERVAL *1.1)), (lambda a: True, True))
def refreshTable(self, force=0):
"""
force=1 will refresh table regardless of last bucket access time
"""
def callback(nodes):
pass
refresh = [bucket for bucket in self.table.buckets if force or (len(bucket.l) < K) or len(filter(lambda a: a.invalid, bucket.l)) or (time() - bucket.lastAccessed > const.BUCKET_STALENESS)]
for bucket in refresh:
id = newIDInRange(bucket.min, bucket.max)
self.findNode(id, callback)
def stats(self):
"""
Returns (num_contacts, num_nodes)
num_contacts: number contacts in our routing table
num_nodes: number of nodes estimated in the entire dht
"""
num_contacts = reduce(lambda a, b: a + len(b.l), self.table.buckets, 0)
num_nodes = const.K * (2**(len(self.table.buckets) - 1))
return {'num_contacts':num_contacts, 'num_nodes':num_nodes}
def krpc_ping(self, id, _krpc_sender):
sender = {'id' : id}
sender['host'] = _krpc_sender[0]
sender['port'] = _krpc_sender[1]
n = self.Node().initWithDict(sender)
self.insertNode(n, contacted=0)
return {"id" : self.node.id}
def krpc_find_node(self, target, id, _krpc_sender):
nodes = self.table.findNodes(target, invalid=False)
nodes = map(lambda node: node.senderDict(), nodes)
sender = {'id' : id}
sender['host'] = _krpc_sender[0]
sender['port'] = _krpc_sender[1]
n = self.Node().initWithDict(sender)
self.insertNode(n, contacted=0)
return {"nodes" : packNodes(nodes), "id" : self.node.id}
## This class provides read-only access to the DHT, valueForKey
## you probably want to use this mixin and provide your own write methods
class KhashmirRead(KhashmirBase):
_Node = KNodeRead
def retrieveValues(self, key):
try:
l = self.store[key]
except KeyError:
l = []
return l
## also async
def valueForKey(self, key, callback, searchlocal = 1):
""" returns the values found for key in global table
callback will be called with a list of values for each peer that returns unique values
final callback will be an empty list - probably should change to 'more coming' arg
"""
nodes = self.table.findNodes(key)
# get locals
if searchlocal:
l = self.retrieveValues(key)
if len(l) > 0:
self.rawserver.external_add_task(callback, 0, (l,))
else:
l = []
# create our search state
state = GetValue(self, key, callback, self.rawserver.add_task)
self.rawserver.external_add_task(state.goWithNodes, 0, (nodes, l))
def krpc_find_value(self, key, id, _krpc_sender):
sender = {'id' : id}
sender['host'] = _krpc_sender[0]
sender['port'] = _krpc_sender[1]
n = self.Node().initWithDict(sender)
self.insertNode(n, contacted=0)
l = self.retrieveValues(key)
if len(l) > 0:
return {'values' : l, "id": self.node.id}
else:
nodes = self.table.findNodes(key, invalid=False)
nodes = map(lambda node: node.senderDict(), nodes)
return {'nodes' : packNodes(nodes), "id": self.node.id}
### provides a generic write method, you probably don't want to deploy something that allows
### arbitrary value storage
class KhashmirWrite(KhashmirRead):
_Node = KNodeWrite
## async, callback indicates nodes we got a response from (but no guarantee they didn't drop it on the floor)
def storeValueForKey(self, key, value, callback=None):
""" stores the value for key in the global table, returns immediately, no status
in this implementation, peers respond but don't indicate status to storing values
a key can have many values
"""
def _storeValueForKey(nodes, key=key, value=value, response=callback , table=self.table):
if not response:
# default callback
def _storedValueHandler(sender):
pass
response=_storedValueHandler
action = StoreValue(self, key, value, response, self.rawserver.add_task)
self.rawserver.external_add_task(action.goWithNodes, 0, (nodes,))
# this call is asynch
self.findNode(key, _storeValueForKey)
def krpc_store_value(self, key, value, id, _krpc_sender):
t = "%0.6f" % time()
self.store[key] = value
sender = {'id' : id}
sender['host'] = _krpc_sender[0]
sender['port'] = _krpc_sender[1]
n = self.Node().initWithDict(sender)
self.insertNode(n, contacted=0)
return {"id" : self.node.id}
# the whole shebang, for testing
class Khashmir(KhashmirWrite):
_Node = KNodeWrite
|
rays/ipodderx-core
|
khashmir/khashmir.py
|
Python
|
mit
| 16,801
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AlertList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the AlertList
:param Version version: Version that contains the resource
:returns: twilio.rest.monitor.v1.alert.AlertList
:rtype: twilio.rest.monitor.v1.alert.AlertList
"""
super(AlertList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Alerts'.format(**self._solution)
def stream(self, log_level=values.unset, start_date=values.unset,
end_date=values.unset, limit=None, page_size=None):
"""
Streams AlertInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode log_level: Only show alerts for this log-level
:param date start_date: Only include alerts that occurred on or after this date
:param date end_date: Only include alerts that occurred on or before this date
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.monitor.v1.alert.AlertInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
log_level=log_level,
start_date=start_date,
end_date=end_date,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, log_level=values.unset, start_date=values.unset,
end_date=values.unset, limit=None, page_size=None):
"""
Lists AlertInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode log_level: Only show alerts for this log-level
:param date start_date: Only include alerts that occurred on or after this date
:param date end_date: Only include alerts that occurred on or before this date
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.monitor.v1.alert.AlertInstance]
"""
return list(self.stream(
log_level=log_level,
start_date=start_date,
end_date=end_date,
limit=limit,
page_size=page_size,
))
def page(self, log_level=values.unset, start_date=values.unset,
end_date=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of AlertInstance records from the API.
Request is executed immediately
:param unicode log_level: Only show alerts for this log-level
:param date start_date: Only include alerts that occurred on or after this date
:param date end_date: Only include alerts that occurred on or before this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AlertInstance
:rtype: twilio.rest.monitor.v1.alert.AlertPage
"""
params = values.of({
'LogLevel': log_level,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return AlertPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AlertInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AlertInstance
:rtype: twilio.rest.monitor.v1.alert.AlertPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AlertPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a AlertContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.monitor.v1.alert.AlertContext
:rtype: twilio.rest.monitor.v1.alert.AlertContext
"""
return AlertContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a AlertContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.monitor.v1.alert.AlertContext
:rtype: twilio.rest.monitor.v1.alert.AlertContext
"""
return AlertContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Monitor.V1.AlertList>'
class AlertPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the AlertPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.monitor.v1.alert.AlertPage
:rtype: twilio.rest.monitor.v1.alert.AlertPage
"""
super(AlertPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AlertInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.monitor.v1.alert.AlertInstance
:rtype: twilio.rest.monitor.v1.alert.AlertInstance
"""
return AlertInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Monitor.V1.AlertPage>'
class AlertContext(InstanceContext):
""" """
def __init__(self, version, sid):
"""
Initialize the AlertContext
:param Version version: Version that contains the resource
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.monitor.v1.alert.AlertContext
:rtype: twilio.rest.monitor.v1.alert.AlertContext
"""
super(AlertContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Alerts/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a AlertInstance
:returns: Fetched AlertInstance
:rtype: twilio.rest.monitor.v1.alert.AlertInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return AlertInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Monitor.V1.AlertContext {}>'.format(context)
class AlertInstance(InstanceResource):
""" """
def __init__(self, version, payload, sid=None):
"""
Initialize the AlertInstance
:returns: twilio.rest.monitor.v1.alert.AlertInstance
:rtype: twilio.rest.monitor.v1.alert.AlertInstance
"""
super(AlertInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'alert_text': payload.get('alert_text'),
'api_version': payload.get('api_version'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_generated': deserialize.iso8601_datetime(payload.get('date_generated')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'error_code': payload.get('error_code'),
'log_level': payload.get('log_level'),
'more_info': payload.get('more_info'),
'request_method': payload.get('request_method'),
'request_url': payload.get('request_url'),
'request_variables': payload.get('request_variables'),
'resource_sid': payload.get('resource_sid'),
'response_body': payload.get('response_body'),
'response_headers': payload.get('response_headers'),
'sid': payload.get('sid'),
'url': payload.get('url'),
'request_headers': payload.get('request_headers'),
'service_sid': payload.get('service_sid'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AlertContext for this AlertInstance
:rtype: twilio.rest.monitor.v1.alert.AlertContext
"""
if self._context is None:
self._context = AlertContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def alert_text(self):
"""
:returns: The text of the alert
:rtype: unicode
"""
return self._properties['alert_text']
@property
def api_version(self):
"""
:returns: The API version used when the alert was generated
:rtype: unicode
"""
return self._properties['api_version']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_generated(self):
"""
:returns: The date and time when the alert was generated specified in ISO 8601 format
:rtype: datetime
"""
return self._properties['date_generated']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def error_code(self):
"""
:returns: The error code for the condition that generated the alert
:rtype: unicode
"""
return self._properties['error_code']
@property
def log_level(self):
"""
:returns: The log level
:rtype: unicode
"""
return self._properties['log_level']
@property
def more_info(self):
"""
:returns: The URL of the page in our Error Dictionary with more information about the error condition
:rtype: unicode
"""
return self._properties['more_info']
@property
def request_method(self):
"""
:returns: The method used by the request that generated the alert
:rtype: unicode
"""
return self._properties['request_method']
@property
def request_url(self):
"""
:returns: The URL of the request that generated the alert
:rtype: unicode
"""
return self._properties['request_url']
@property
def request_variables(self):
"""
:returns: The variables passed in the request that generated the alert
:rtype: unicode
"""
return self._properties['request_variables']
@property
def resource_sid(self):
"""
:returns: The SID of the resource for which the alert was generated
:rtype: unicode
"""
return self._properties['resource_sid']
@property
def response_body(self):
"""
:returns: The response body of the request that generated the alert
:rtype: unicode
"""
return self._properties['response_body']
@property
def response_headers(self):
"""
:returns: The response headers of the request that generated the alert
:rtype: unicode
"""
return self._properties['response_headers']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def url(self):
"""
:returns: The absolute URL of the Alert resource
:rtype: unicode
"""
return self._properties['url']
@property
def request_headers(self):
"""
:returns: The request headers of the request that generated the alert
:rtype: unicode
"""
return self._properties['request_headers']
@property
def service_sid(self):
"""
:returns: The SID of the service or resource that generated the alert
:rtype: unicode
"""
return self._properties['service_sid']
def fetch(self):
"""
Fetch a AlertInstance
:returns: Fetched AlertInstance
:rtype: twilio.rest.monitor.v1.alert.AlertInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Monitor.V1.AlertInstance {}>'.format(context)
|
tysonholub/twilio-python
|
twilio/rest/monitor/v1/alert.py
|
Python
|
mit
| 15,815
|
# -*-coding:Utf-8 -*-
# Compatibility 2.7-3.4
from __future__ import absolute_import
from __future__ import unicode_literals
import flask
from app import app
@app.errorhandler(404)
def page_not_found(error):
return "ERROR: You must fill the requested fields", 404, \
{"content-type": "text/plain; charset=utf-8"}
|
VeryTastyTomato/passhport
|
passhportd/app/views_mod/__init__.py
|
Python
|
agpl-3.0
| 329
|
import os
import unittest
import cStringIO
import operator
import itertools
import functools
import amara
from amara.writers.struct import *
from amara.writers import lookup
from amara import bindery
from amara.bindery.util import property_str_getter
from amara.test import file_finder
FILE = file_finder(__file__)
def merge_folders(folder1, folder2):
#Yes, the list must be copied to avoid mutate-while-iterate bugs
for child in folder2.xml_select('*'):
#No need to copy title element
if child.xml_qname == u'title': continue
elif child.xml_qname == u'folder':
for a_folder in folder1.folder:
if unicode(child.title) == unicode(a_folder.title):
merge_folders(a_folder, child)
break
else:
folder1.xml_append(child)
else:
folder1.xml_append(child)
return
def xbel_merge(xbel1, xbel2):
for child in xbel2.xml_select('*'):
if child.xml_qname == u'folder':
for a_folder in xbel1.folder:
if unicode(child.title) == unicode(a_folder.title):
merge_folders(a_folder, child)
break
else:
xbel1.xml_append(child)
elif child.xml_qname == u'bookmark':
xbel1.xml_append(child)
return
def normalize_whitespace(doc):
"""Normalize DOM tree and remove whitespace.
Necessary to ensure that the pretty-printed XML tree
looks correct.
"""
doc.xml_normalize() # Merge text nodes where possible
for text in list(doc.xml_select('descendant::text()')):
# If text node is all whitespace or empty, remove it.
if not text.xml_value.strip():
text.xml_parent.xml_remove(text)
title_getter = functools.partial(property_str_getter, 'title')
def merge(f1, f2):
folders = sorted(itertools.chain(f1.folder or [], f2.folder or []),
key=title_getter)
folder_groups = itertools.groupby(folders, title_getter)
for ftitle, folders in folder_groups:
main = folders.next()
rest = list(folders)
for f in rest:
merge(main, f)
if main.xml_parent != f1:
f1.xml_append(main)
#All elements that are not folder, title, or info
for e in f2.xml_select(u'*[not(self::folder or self::title or self::info)]'):
f1.xml_append(e)
return
class TestIterators(unittest.TestCase):
def setUp(self):
self.XML ="""\
<env>
<a id="1"/>
<b id="1.1"/>
<c id="1.2"/>
<a id="2"/>
<b id="2.1"/>
<c id="2.2"/>
<a id="3"/>
<b id="3.1"/>
<c id="3.2"/>
</env>
"""
def test_iterator(self):
doc = amara.parse(self.XML)
output = cStringIO.StringIO()
XML_groupby="""\
<?xml version="1.0" encoding="utf-8"?>
<env>
<a-wrapper>
<a id="1"/>
<a id="2"/>
<a id="3"/>
</a-wrapper>
<b-wrapper>
<b id="1.1"/>
<b id="2.1"/>
<b id="3.1"/>
</b-wrapper>
<c-wrapper>
<c id="1.2"/>
<c id="2.2"/>
<c id="3.2"/>
</c-wrapper>
</env>"""
leaves = sorted(doc.xml_select(u'/env/*'), key=operator.attrgetter('xml_name'))
w = structwriter(indent=u"yes", stream=output)
w.feed(
ROOT(
E(u'env',
( E(ename + u'-wrapper',
( E(ename, e.xml_attributes.copy(), e.xml_children) for e in elems )
) for ename, elems in itertools.groupby(leaves, lambda x: x.xml_qname) ),
)
))
self.assertEqual(output.getvalue(), XML_groupby)
def test_combined(self):
#ATOM1 = 'http://zepheira.com/news/atom/entries/'
#ATOM2 = 'http://ma.gnolia.com/atom/full/people/Uche'
ATOM1 = FILE('zepheira_atom.xml') #local download for testing
ATOM2 = FILE('magnolia_uche.xml') #local download for testing
output = cStringIO.StringIO()
combined_output = open(FILE('entries_combined.txt')).read() #local file for testing
doc1 = bindery.parse(ATOM1)
doc2 = bindery.parse(ATOM2)
combined = itertools.chain(*[doc.feed.entry for doc in (doc1, doc2)])
for node in sorted(combined, key=operator.attrgetter('updated')):
print >> output, node.title
self.assertEqual(output.getvalue(), combined_output)
def test_xbel_1(self):
#BM1 = 'http://hg.4suite.org/amara/trunk/raw-file/bb6c40828b2d/demo/7days/bm1.xbel'
#BM2 = 'http://hg.4suite.org/amara/trunk/raw-file/bb6c40828b2d/demo/7days/bm2.xbel'
doc1 = bindery.parse(FILE('bm1.xbel'))
doc2 = bindery.parse(FILE('bm2.xbel'))
xbel_merge(doc1.xbel, doc2.xbel)
normalize_whitespace(doc1)
output = doc1.xml_encode(lookup("xml-indent")) + '\n'
self.assertEqual(output, open(FILE('merged-1.xbel')).read())
def test_xbel_2(self):
#BM1 = 'http://hg.4suite.org/amara/trunk/raw-file/bb6c40828b2d/demo/7days/bm1.xbel'
#BM2 = 'http://hg.4suite.org/amara/trunk/raw-file/bb6c40828b2d/demo/7days/bm2.xbel'
doc1 = bindery.parse(FILE('bm1.xbel'))
doc2 = bindery.parse(FILE('bm2.xbel'))
merge(doc1.xbel, doc2.xbel)
normalize_whitespace(doc1)
output = doc1.xml_encode(lookup("xml-indent")) + '\n'
self.assertEqual(output, open(FILE('merged-2.xbel')).read())
if __name__ == '__main__':
unittest.main()
|
zepheira/amara
|
test/sevendays/test_three.py
|
Python
|
apache-2.0
| 5,515
|
T = int(input())
arr = []
dirr = {}
while(T):
T-=1
a,b = map(int, raw_input().split())
arr.append(a+b)
arr2 = sorted(arr)
fin = []
for i in range(len(arr2)):
for j in range(len(arr)):
if arr[i] == arr2[j]:
fin.append(j+1)
#print arr
#print arr2
print reduce(lambda x, y: str(x) + " "+ str(y), fin)
|
Dawny33/Code
|
Hackerrank/101 Hack Sept/order.py
|
Python
|
gpl-3.0
| 346
|
import sys, os
import unittest
# A list of demos that depend on user-interface of *any* kind. Tests listed
# here are not suitable for unattended testing.
ui_demos = """GetSaveFileName print_desktop win32cred_demo win32gui_demo
win32gui_dialog win32gui_menu win32gui_taskbar
win32rcparser_demo winprocess win32console_demo""".split()
# Other demos known as 'bad' (or at least highly unlikely to work)
# cerapi: no CE module is built (CE via pywin32 appears dead)
# desktopmanager: hangs (well, hangs for 60secs or so...)
bad_demos = "cerapi desktopmanager win32comport_demo".split()
argvs = {
"rastest": ("-l",),
}
ok_exceptions = {
"RegCreateKeyTransacted": ("NotImplementedError"),
}
class TestRunner:
def __init__(self, argv):
self.argv = argv
def __call__(self):
# subprocess failed in strange ways for me??
fin, fout, ferr = os.popen3(" ".join(self.argv))
fin.close()
output = fout.read() + ferr.read()
fout.close()
rc = ferr.close()
if rc:
base = os.path.basename(self.argv[1])
raise AssertionError, "%s failed with exit code %s. Output is:\n%s" % (base, rc, output)
def get_demo_tests():
import win32api
ret = []
demo_dir = os.path.abspath(os.path.join(os.path.dirname(win32api.__file__), "Demos"))
assert os.path.isdir(demo_dir), demo_dir
for name in os.listdir(demo_dir):
base, ext = os.path.splitext(name)
if ext != ".py" or base in ui_demos or base in bad_demos:
continue
if base in ok_exceptions:
print "Ack - can't handle test %s - can't catch specific exceptions" % (base,)
continue
argv = (sys.executable, os.path.join(demo_dir, base+".py")) + \
argvs.get(base, ())
ret.append(unittest.FunctionTestCase(TestRunner(argv), description="win32/demos/" + name))
return ret
def import_all():
# Some hacks for import order - dde depends on win32ui
try:
import win32ui
except ImportError:
pass # 'what-ev-a....'
import win32api
dir = os.path.dirname(win32api.__file__)
num = 0
is_debug = os.path.basename(win32api.__file__).endswith("_d")
for name in os.listdir(dir):
base, ext = os.path.splitext(name)
if (ext==".pyd") and \
name != "_winxptheme.pyd" and \
(is_debug and base.endswith("_d") or \
not is_debug and not base.endswith("_d")):
try:
__import__(base)
except ImportError:
print "FAILED to import", name
raise
num += 1
def suite():
# Loop over all .py files here, except me :)
try:
me = __file__
except NameError:
me = sys.argv[0]
me = os.path.abspath(me)
files = os.listdir(os.path.dirname(me))
suite = unittest.TestSuite()
suite.addTest(unittest.FunctionTestCase(import_all))
for file in files:
base, ext = os.path.splitext(file)
if ext=='.py' and os.path.basename(me) != file:
try:
mod = __import__(base)
except ImportError, why:
print "FAILED to import test module"
print why
continue
if hasattr(mod, "suite"):
test = mod.suite()
else:
test = unittest.defaultTestLoader.loadTestsFromModule(mod)
suite.addTest(test)
for test in get_demo_tests():
suite.addTest(test)
return suite
class CustomLoader(unittest.TestLoader):
def loadTestsFromModule(self, module):
return suite()
if __name__=='__main__':
unittest.TestProgram(testLoader=CustomLoader())(argv=sys.argv)
|
leighpauls/k2cro4
|
third_party/python_26/Lib/site-packages/win32/test/testall.py
|
Python
|
bsd-3-clause
| 3,777
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Service action implementations"""
import argparse
import six
from eclcli.common import command
from eclcli.common import exceptions
from eclcli.common import utils
from eclcli.i18n import _ # noqa
from eclcli.identity import common
class CreateService(command.ShowOne):
"""Create new service"""
def get_parser(self, prog_name):
parser = super(CreateService, self).get_parser(prog_name)
parser.add_argument(
'type_or_name',
metavar='<type>',
help=_('New service type (compute, image, identity, volume, etc)'),
)
type_or_name_group = parser.add_mutually_exclusive_group()
type_or_name_group.add_argument(
'--type',
metavar='<type>',
help=argparse.SUPPRESS,
)
type_or_name_group.add_argument(
'--name',
metavar='<name>',
help=_('New service name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New service description'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
type_or_name = parsed_args.type_or_name
name = parsed_args.name
type = parsed_args.type
# If only a single positional is present, it's a <type>.
# This is not currently legal so it is considered a new case.
if not type and not name:
type = type_or_name
# If --type option is present then positional is handled as <name>;
# display deprecation message.
elif type:
name = type_or_name
self.log.warning(_('The argument --type is deprecated, use service'
' create --name <service-name> type instead.'))
# If --name option is present the positional is handled as <type>.
# Making --type optional is new, but back-compatible
elif name:
type = type_or_name
service = identity_client.services.create(
name,
type,
parsed_args.description)
info = {}
info.update(service._info)
return zip(*sorted(six.iteritems(info)))
class DeleteService(command.Command):
"""Delete service"""
def get_parser(self, prog_name):
parser = super(DeleteService, self).get_parser(prog_name)
parser.add_argument(
'service',
metavar='<service>',
help=_('Service to delete (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
service = common.find_service(identity_client, parsed_args.service)
identity_client.services.delete(service.id)
return
class ListService(command.Lister):
"""List services"""
def get_parser(self, prog_name):
parser = super(ListService, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
def take_action(self, parsed_args):
if parsed_args.long:
columns = ('ID', 'Name', 'Type', 'Description')
else:
columns = ('ID', 'Name', 'Type')
data = self.app.client_manager.identity.services.list()
return (
columns,
(utils.get_item_properties(s, columns) for s in data),
)
class ShowService(command.ShowOne):
"""Display service details"""
def get_parser(self, prog_name):
parser = super(ShowService, self).get_parser(prog_name)
parser.add_argument(
'service',
metavar='<service>',
help=_('Service to display (type, name or ID)'),
)
parser.add_argument(
'--catalog',
action='store_true',
default=False,
help=_('Show service catalog information'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
auth_ref = self.app.client_manager.auth_ref
if parsed_args.catalog:
endpoints = auth_ref.service_catalog.get_endpoints(
service_type=parsed_args.service)
for (service, service_endpoints) in six.iteritems(endpoints):
if service_endpoints:
info = {"type": service}
info.update(service_endpoints[0])
return zip(*sorted(six.iteritems(info)))
msg = _("No service catalog with a type, name or ID of '%s' "
"exists.") % (parsed_args.service)
raise exceptions.CommandError(msg)
else:
service = common.find_service(identity_client, parsed_args.service)
info = {}
info.update(service._info)
return zip(*sorted(six.iteritems(info)))
|
nttcom/eclcli
|
eclcli/identity/v2_0/service.py
|
Python
|
apache-2.0
| 5,681
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Pyromaths
# Un programme en Python qui permet de créer des fiches d'exercices types de
# mathématiques niveau collège ainsi que leur corrigé en LaTeX.
# Copyright (C) 2014 -- Jérôme Ortais (jerome.ortais@pyromaths.org)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from random import randrange, shuffle
from pyromaths.classes.Fractions import Fraction
from pyromaths.outils import Priorites3
from pyromaths.outils.Arithmetique import pgcd
def id_rem():
"""Génère un exercice de développement des 3 identités remarquables avec une situation piège.
Dans un premier temps, on n'utilise que des nombres entiers, puis des fractions, puis l'opposé
d'une expression littérale.
"""
l = [randrange(1, 11) for dummy in range(14)]
while pgcd(l[8], l[9]) != 1 or pgcd(l[10], l[11]) != 1 or (l[9] == 1 and l[11] == 1):
# On crée deux rationnels irréductibles non tous deux entiers.
l = [randrange(1, 11) for dummy in range(14)]
lpoly = [id_rem1(l[0], l[1]), id_rem2(l[2], l[3]), id_rem3(l[4], l[5]), id_rem4(l[6], l[7])]
shuffle(lpoly)
lid = [id_rem1, id_rem2, id_rem3, id_rem4]
lpoly2 = [lid.pop(randrange(4))(Fraction(l[8], l[9]), Fraction(l[10], l[11]))]
lpoly2.append('-' + lid.pop(randrange(3))(l[12], l[13]))
shuffle(lpoly2)
lpoly.extend(lpoly2)
expr = [Priorites3.texify([Priorites3.splitting(lpoly[i])]) for i in range(6)]
exo = ["\\exercice", u"Développer chacune des expressions littérales suivantes :"]
exo.append("\\begin{multicols}{2}")
exo.append('\\\\\n'.join(['$%s=%s$' % (chr(i + 65), expr[i][0]) for i in range(6)]))
exo.append("\\end{multicols}")
cor = ["\\exercice*", u"Développer chacune des expressions littérales suivantes :"]
cor.append("\\begin{multicols}{2}")
for i in range(6):
dev = Priorites3.texify(Priorites3.priorites(lpoly[i]))
dev.insert(0, expr[i][0])
cor.append('\\\\\n'.join(['$%s=%s$' % (chr(i + 65), dev[j]) for j in range(len(dev) - 1)]))
cor.append('\\\\')
cor.append('\\fbox{$%s=%s$}\\\\\n' % (chr(i + 65), dev[-1]))
cor.append("\\end{multicols}")
return exo, cor
def id_rem1(a, b, details=2):
"""Construit un Polynome de la forme (ax+b)^2
Renvoie une chaine"""
return 'Polynome([[%r, 1], [%r, 0]], details=%s)**2' % (a, b, details)
def id_rem2(a, b, details=2):
"""Construit un Polynome de la forme (ax-b)^2
Renvoie une chaine"""
return 'Polynome([[%r, 1], [%r, 0]], details=%s)**2' % (a, -b, details)
def id_rem3(a, b, details=2):
"""Construit un Polynome de la forme (ax+b)(ax-b) ou (ax-b)(ax+b)
Renvoie une chaine"""
sgn = randrange(2)
return 'Polynome([[%r, 1], [%r, 0]], details=%s) * Polynome([[%r, 1], [%r, 0]], details=%s)' % (a, (-1) ** sgn * b, details, a, (-1) ** (sgn + 1) * b, details)
def id_rem4(a, b, details=2):
"""Construit un Polynome de la forme (ax+b)(bx-a) ou (ax-b)(bx+a)
Renvoie une chaine"""
sgn = randrange(2)
return 'Polynome([[%r, 1], [%r, 0]], details=%s) * Polynome([[%r, 1], [%r, 0]], details=%s)' % (a, (-1) ** sgn * b, details, b, (-1) ** (sgn + 1) * a, details)
id_rem.description = u'Identités remarquables'
|
JeromeO/Pyromaths
|
src/pyromaths/ex/troisiemes/developpements.py
|
Python
|
gpl-2.0
| 3,907
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, models
class StockMove(models.Model):
_inherit = "stock.move"
def _filter_anglo_saxon_moves(self, product):
res = super(StockMove, self)._filter_anglo_saxon_moves(product)
res += self.filtered(lambda m: m.bom_line_id.bom_id.product_tmpl_id.id == product.product_tmpl_id.id)
return res
def _generate_analytic_lines_data(self, unit_amount, amount):
vals = super()._generate_analytic_lines_data(unit_amount, amount)
if self.raw_material_production_id.analytic_account_id:
vals['name'] = _('[Raw] %s', self.product_id.display_name)
vals['ref'] = self.raw_material_production_id.display_name
vals['category'] = 'manufacturing_order'
return vals
def _get_analytic_account(self):
account = self.raw_material_production_id.analytic_account_id
if account:
return account
return super()._get_analytic_account()
def _get_src_account(self, accounts_data):
if not self.unbuild_id:
return super()._get_src_account(accounts_data)
else:
return self.location_dest_id.valuation_out_account_id.id or accounts_data['stock_input'].id
def _get_dest_account(self, accounts_data):
if not self.unbuild_id:
return super()._get_dest_account(accounts_data)
else:
return self.location_id.valuation_in_account_id.id or accounts_data['stock_output'].id
def _is_returned(self, valued_type):
if self.unbuild_id:
return True
return super()._is_returned(valued_type)
|
jeremiahyan/odoo
|
addons/mrp_account/models/stock_move.py
|
Python
|
gpl-3.0
| 1,716
|
import unittest
import logging
import time
from mock import Mock, MagicMock, patch
from django.conf import settings
from django.test import TestCase
from xmodule.course_module import CourseDescriptor
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import Location
from xmodule.timeparse import parse_time
from xmodule.x_module import XModule, XModuleDescriptor
import courseware.access as access
from .factories import CourseEnrollmentAllowedFactory
class AccessTestCase(TestCase):
def test__has_global_staff_access(self):
u = Mock(is_staff=False)
self.assertFalse(access._has_global_staff_access(u))
u = Mock(is_staff=True)
self.assertTrue(access._has_global_staff_access(u))
def test__has_access_to_location(self):
location = Location('i4x://edX/toy/course/2012_Fall')
self.assertFalse(access._has_access_to_location(None, location,
'staff', None))
u = Mock()
u.is_authenticated.return_value = False
self.assertFalse(access._has_access_to_location(u, location,
'staff', None))
u = Mock(is_staff=True)
self.assertTrue(access._has_access_to_location(u, location,
'instructor', None))
# A user has staff access if they are in the staff group
u = Mock(is_staff=False)
g = Mock()
g.name = 'staff_edX/toy/2012_Fall'
u.groups.all.return_value = [g]
self.assertTrue(access._has_access_to_location(u, location,
'staff', None))
# A user has staff access if they are in the instructor group
g.name = 'instructor_edX/toy/2012_Fall'
self.assertTrue(access._has_access_to_location(u, location,
'staff', None))
# A user has instructor access if they are in the instructor group
g.name = 'instructor_edX/toy/2012_Fall'
self.assertTrue(access._has_access_to_location(u, location,
'instructor', None))
# A user does not have staff access if they are
# not in either the staff or the the instructor group
g.name = 'student_only'
self.assertFalse(access._has_access_to_location(u, location,
'staff', None))
# A user does not have instructor access if they are
# not in the instructor group
g.name = 'student_only'
self.assertFalse(access._has_access_to_location(u, location,
'instructor', None))
def test__has_access_string(self):
u = Mock(is_staff=True)
self.assertFalse(access._has_access_string(u, 'not_global', 'staff', None))
u._has_global_staff_access.return_value = True
self.assertTrue(access._has_access_string(u, 'global', 'staff', None))
self.assertRaises(ValueError, access._has_access_string, u, 'global', 'not_staff', None)
def test__has_access_descriptor(self):
# TODO: override DISABLE_START_DATES and test the start date branch of the method
u = Mock()
d = Mock()
d.start = time.gmtime(time.time() - 86400) # make sure the start time is in the past
# Always returns true because DISABLE_START_DATES is set in test.py
self.assertTrue(access._has_access_descriptor(u, d, 'load'))
self.assertRaises(ValueError, access._has_access_descriptor, u, d, 'not_load_or_staff')
def test__has_access_course_desc_can_enroll(self):
u = Mock()
yesterday = time.gmtime(time.time() - 86400)
tomorrow = time.gmtime(time.time() + 86400)
c = Mock(enrollment_start=yesterday, enrollment_end=tomorrow)
# User can enroll if it is between the start and end dates
self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
# User can enroll if authenticated and specifically allowed for that course
# even outside the open enrollment period
u = Mock(email='test@edx.org', is_staff=False)
u.is_authenticated.return_value = True
c = Mock(enrollment_start=tomorrow, enrollment_end=tomorrow, id='edX/test/2012_Fall')
allowed = CourseEnrollmentAllowedFactory(email=u.email, course_id=c.id)
self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
# Staff can always enroll even outside the open enrollment period
u = Mock(email='test@edx.org', is_staff=True)
u.is_authenticated.return_value = True
c = Mock(enrollment_start=tomorrow, enrollment_end=tomorrow, id='edX/test/Whenever')
self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
# TODO:
# Non-staff cannot enroll outside the open enrollment period if not specifically allowed
|
elimence/edx-platform
|
lms/djangoapps/courseware/tests/test_access.py
|
Python
|
agpl-3.0
| 5,047
|
# -*- coding: UTF-8
# jobs/base
# *********
#
# Base class for implement the scheduled tasks
import time
from twisted.internet import task, defer, reactor, threads
from globaleaks.handlers.base import TimingStatsHandler
from globaleaks.utils.mailutils import send_exception_email, extract_exception_traceback_and_send_email
from globaleaks.utils.utility import log, datetime_null
test_reactor = None
class GLJob(task.LoopingCall):
name = "unnamed"
interval = 60
low_time = -1
high_time = -1
mean_time = -1
start_time = -1
def operation(self):
raise NotImplementedError('GLJob does not implement operation')
# The minimum interval (seconds) the job has taken to execute before an
# exception will be recorded. If the job does not finish, every monitor_interval
# after the first exception another will be generated.
monitor_period = 5 * 60
last_monitor_check_failed = 0 # Epoch start
def __init__(self):
self.job = task.LoopingCall.__init__(self, self.run)
self.clock = reactor if test_reactor is None else test_reactor
def _errback(self, loopingCall):
error = "Job %s died with runtime %.4f [low: %.4f, high: %.4f]" % \
(self.name, self.mean_time, self.low_time, self.high_time)
log.err(error)
send_exception_email(error)
def start(self, interval):
task.LoopingCall.start(self, interval).addErrback(self._errback)
def get_start_time(self):
return 0
def schedule(self):
delay = self.get_start_time()
if delay < 1:
delay = 1
self.clock.callLater(delay, self.start, self.interval)
def stats_collection_begin(self):
self.start_time = time.time()
def stats_collection_end(self):
current_run_time = time.time() - self.start_time
# discard empty cycles from stats
if self.mean_time == -1:
self.meantime = current_run_time
else:
self.mean_time = (self.mean_time * 0.7) + (current_run_time * 0.3)
if self.low_time == -1 or current_run_time < self.low_time:
self.low_time = current_run_time
if self.high_time == -1 or current_run_time > self.high_time:
self.high_time = current_run_time
@defer.inlineCallbacks
def run(self):
self.stats_collection_begin()
try:
yield threads.deferToThread(self.operation)
except Exception as e:
log.err("Exception while performing scheduled operation %s: %s" % \
(type(self).__name__, e))
extract_exception_traceback_and_send_email(e)
self.stats_collection_end()
class GLJobsMonitor(GLJob):
name = "jobs monitor"
interval = 2
def __init__(self, jobs_list):
GLJob.__init__(self)
self.jobs_list = jobs_list
def operation(self):
current_time = time.time()
error_msg = ""
for job in self.jobs_list:
execution_time = 0
if job.running:
execution_time = current_time - job.start_time
time_from_last_failed_check = current_time - job.last_monitor_check_failed
if (execution_time > job.monitor_interval
and time_from_last_failed_check > job.monitor_interval):
job.last_monitor_check_failed = current_time
if execution_time < 60:
error = "Job %s is taking more than %d seconds to execute" % (job.name, execution_time)
elif execution_time < 3600:
minutes = int(execution_time / 60)
error = "Job %s is taking more than %d minutes to execute" % (job.name, minutes)
else:
hours = int(execution_time / 3600)
error = "Job %s is taking more than %d hours to execute" % (job.name, hours)
error_msg += '\n' + error
log.err(error)
if error_msg != "":
send_exception_email(error)
|
vodkina/GlobaLeaks
|
backend/globaleaks/jobs/base.py
|
Python
|
agpl-3.0
| 4,133
|
"""Support for monitoring a Neurio energy sensor."""
import logging
from datetime import timedelta
import requests.exceptions
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_API_KEY, POWER_WATT,
ENERGY_KILO_WATT_HOUR)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_API_SECRET = 'api_secret'
CONF_SENSOR_ID = 'sensor_id'
ACTIVE_NAME = 'Energy Usage'
DAILY_NAME = 'Daily Energy Usage'
ACTIVE_TYPE = 'active'
DAILY_TYPE = 'daily'
ICON = 'mdi:flash'
MIN_TIME_BETWEEN_DAILY_UPDATES = timedelta(seconds=150)
MIN_TIME_BETWEEN_ACTIVE_UPDATES = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_API_SECRET): cv.string,
vol.Optional(CONF_SENSOR_ID): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Neurio sensor."""
api_key = config.get(CONF_API_KEY)
api_secret = config.get(CONF_API_SECRET)
sensor_id = config.get(CONF_SENSOR_ID)
data = NeurioData(api_key, api_secret, sensor_id)
@Throttle(MIN_TIME_BETWEEN_DAILY_UPDATES)
def update_daily():
"""Update the daily power usage."""
data.get_daily_usage()
@Throttle(MIN_TIME_BETWEEN_ACTIVE_UPDATES)
def update_active():
"""Update the active power usage."""
data.get_active_power()
update_daily()
update_active()
# Active power sensor
add_entities([NeurioEnergy(data, ACTIVE_NAME, ACTIVE_TYPE, update_active)])
# Daily power sensor
add_entities([NeurioEnergy(data, DAILY_NAME, DAILY_TYPE, update_daily)])
class NeurioData:
"""Stores data retrieved from Neurio sensor."""
def __init__(self, api_key, api_secret, sensor_id):
"""Initialize the data."""
import neurio
self.api_key = api_key
self.api_secret = api_secret
self.sensor_id = sensor_id
self._daily_usage = None
self._active_power = None
self._state = None
neurio_tp = neurio.TokenProvider(key=api_key, secret=api_secret)
self.neurio_client = neurio.Client(token_provider=neurio_tp)
if not self.sensor_id:
user_info = self.neurio_client.get_user_information()
_LOGGER.warning("Sensor ID auto-detected: %s", user_info[
"locations"][0]["sensors"][0]["sensorId"])
self.sensor_id = user_info[
"locations"][0]["sensors"][0]["sensorId"]
@property
def daily_usage(self):
"""Return latest daily usage value."""
return self._daily_usage
@property
def active_power(self):
"""Return latest active power value."""
return self._active_power
def get_active_power(self):
"""Return current power value."""
try:
sample = self.neurio_client.get_samples_live_last(self.sensor_id)
self._active_power = sample['consumptionPower']
except (requests.exceptions.RequestException, ValueError, KeyError):
_LOGGER.warning("Could not update current power usage")
return None
def get_daily_usage(self):
"""Return current daily power usage."""
kwh = 0
start_time = dt_util.start_of_local_day() \
.astimezone(dt_util.UTC).isoformat()
end_time = dt_util.utcnow().isoformat()
_LOGGER.debug('Start: %s, End: %s', start_time, end_time)
try:
history = self.neurio_client.get_samples_stats(
self.sensor_id, start_time, 'days', end_time)
except (requests.exceptions.RequestException, ValueError, KeyError):
_LOGGER.warning("Could not update daily power usage")
return None
for result in history:
kwh += result['consumptionEnergy'] / 3600000
self._daily_usage = round(kwh, 2)
class NeurioEnergy(Entity):
"""Implementation of a Neurio energy sensor."""
def __init__(self, data, name, sensor_type, update_call):
"""Initialize the sensor."""
self._name = name
self._data = data
self._sensor_type = sensor_type
self.update_sensor = update_call
self._state = None
if sensor_type == ACTIVE_TYPE:
self._unit_of_measurement = POWER_WATT
elif sensor_type == DAILY_TYPE:
self._unit_of_measurement = ENERGY_KILO_WATT_HOUR
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data, update state."""
self.update_sensor()
if self._sensor_type == ACTIVE_TYPE:
self._state = self._data.active_power
elif self._sensor_type == DAILY_TYPE:
self._state = self._data.daily_usage
|
MartinHjelmare/home-assistant
|
homeassistant/components/neurio_energy/sensor.py
|
Python
|
apache-2.0
| 5,417
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Autor: Alexey V. Polurotov
# e-mail: niimailtah@gmail.com
# Common nick: Niimailtah
# ----------------------------------------------------------------------------
# https://projecteuler.net/problem=14
# Longest Collatz sequence
# Problem 14
#
# The following iterative sequence is defined for the set of positive integers:
#
# n → n/2 (n is even)
# n → 3n + 1 (n is odd)
#
# Using the rule above and starting with 13, we generate the following sequence:
#
# 13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
# It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms.
# Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
#
# Which starting number, under one million, produces the longest chain?
#
# NOTE: Once the chain starts the terms are allowed to go above one million.
def seq(n):
result = [n]
while n != 1:
if n % 2 == 0:
n = int(n / 2)
else:
n = 3 * n + 1
result.append(n)
return result
max_length = 1
start_number = 1
for current_number in range(1, 10**6):
chain_length = len(seq(current_number))
print(max_length, current_number, chain_length)
if chain_length > max_length:
max_length = chain_length
start_number = current_number
print(start_number)
|
niimailtah/projecteuler.net
|
sources/problem014.py
|
Python
|
gpl-2.0
| 1,406
|
# -*- coding: utf-8 -*-
import pygtk
pygtk.require("2.0")
import gtk
import time
import thread
import os
from datetime import datetime
from Timetableasy import app
connection_status = {
0 : {
'stock' : 'gtk-disconnect',
'tooltip' : 'Vous êtes actuellement déconnecté.'
},
1 : {
'stock' : 'gtk-connect',
'tooltip' : 'Vous êtes actuellement connecté au serveur.'
},
2 : {
'stock' : 'gtk-harddisk',
'tooltip' : 'Vous êtes actuellement en mode hors-ligne. Vous pouvez visualiser toutes vos informations.'
}
}
actions = {
1 : {
'msg' : 'Terminé',
'stock' : None,
'file' : None
},
2 : {
'msg' : 'Connecté',
'stock' : None,
'file' : None
},
3 : {
'msg' : 'Déconnecté',
'stock' : None,
'file' : None
},
4 : {
'msg' : 'Connexion en cours...',
'stock' : None,
'file' : None
},
5 : {
'msg' : 'Déconnexion en cours...',
'stock' : None,
'file' : None
},
6 : {
'msg' : 'Identification requise',
'stock' : 'gtk-dialog-warning',
'file' : None
},
7 : {
'msg' : 'Passé en mode hors-ligne',
'stock' : None,
'file' : None
},
8 : {
'msg' : 'Passé en mode en-ligne',
'stock' : None,
'file' : None
},
9 : {
'msg' : 'Impossible de contacter le server',
'stock' : 'gtk-dialog-warning',
'file' : None
},
}
class Status_Bar(object):
def __init__(self, statusbar, image_object, icon_object, label_date):
self.statusbar = statusbar
self.image = image_object
self.icon = icon_object
self.date = label_date
self.date.hide()
self.time = None
self.animation = gtk.gdk.PixbufAnimation('graphics/images/ajax-loader.gif')
self.add_action('icon', 6)
self.set_connection_status(0)
thread.start_new_thread(self.display_date, ())
def add_action(self, icon_type, action_id, specific_msg = None, specific_icon = None):
if (icon_type == 'progress'):
self.time = datetime.fromtimestamp(time.time())
self.statusbar.push(action_id, actions[action_id]['msg'])
self.image.set_from_animation(self.animation)
elif (icon_type == 'icon'):
if (specific_msg == None and specific_icon == None):
if (self.time != None):
diff_time = (datetime.fromtimestamp(time.time()) - self.time)/1000
status_text = actions[action_id]['msg'] + ' (effectué en : '+ str(diff_time.microseconds) + ' ms)'
self.time = None
else:
status_text = actions[action_id]['msg']
self.statusbar.push(action_id, status_text)
if (actions[action_id]['stock'] != None):
self.image.set_from_stock(actions[action_id]['stock'], gtk.ICON_SIZE_MENU)
elif (actions[action_id]['file'] != None):
self.image.set_from_file(os.path.normpath('graphics/images/' + actions[action_id]['file']))
else:
self.image.set_from_stock('gtk-info', gtk.ICON_SIZE_MENU)
else:
self.statusbar.push(0, specific_msg)
"""
s//stock_id = from stock
f//filepath = from file
"""
icon = specific_icon.split('//', 1)
if (icon[0] == 's'):
self.image.set_from_stock(icon[1], gtk.ICON_SIZE_MENU)
elif (icon[0] == 'f'):
if (os.path.isfile(icon[1])):
self.image.set_from_file(os.path.normpath('graphics/images/' + icon[1]))
else:
self.image.set_from_stock('gtk-info', gtk.ICON_SIZE_MENU)
else:
self.image.set_from_stock('gtk-info', gtk.ICON_SIZE_MENU)
def set_connection_status(self, status_id):
self.icon.set_from_stock(connection_status[status_id]['stock'], gtk.ICON_SIZE_MENU)
self.icon.set_tooltip_text(connection_status[status_id]['tooltip'])
def check_date_display(self):
if (app.settings.display_date):
self.date.show()
else:
self.date.hide()
def display_date(self):
while 1:
self.date.set_text(time.strftime('%a %d %b %Y, %H:%M:%S',time.localtime()))
time.sleep(1)
|
SBillion/timetableasy
|
src/Status_Bar.py
|
Python
|
agpl-3.0
| 3,849
|
"""A streaming dataflow pipeline to count pub/sub messages.
"""
import argparse
import logging
from datetime import datetime
import apache_beam as beam
from apache_beam.options.pipeline_options import (
GoogleCloudOptions,
PipelineOptions,
SetupOptions,
StandardOptions,
)
from apache_beam.transforms import window # pylint: disable=unused-import
class CountFn(beam.CombineFn):
"""Counter function to accumulate statistics"""
def create_accumulator(self):
return 0
def add_input(self, count, element):
del element
return count + 1
def merge_accumulators(self, accumulators):
return sum(accumulators)
def extract_output(self, count):
return count
def run(argv=None):
"""Build and run the pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--project", help=("Google Cloud Project ID"), required=True
)
parser.add_argument("--region", help=("Google Cloud region"), required=True)
parser.add_argument(
"--input_topic",
help=("Google Cloud PubSub topic name "),
required=True,
)
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
pipeline_options.view_as(StandardOptions).streaming = True
pipeline_options.view_as(GoogleCloudOptions).region = known_args.region
pipeline_options.view_as(GoogleCloudOptions).project = known_args.project
p = beam.Pipeline(options=pipeline_options)
topic = f"projects/{known_args.project}/topics/{known_args.input_topic}"
# this table needs to exist
table_spec = f"{known_args.project}:taxifare.traffic_realtime"
def to_bq_format(count):
"""BigQuery writer requires rows to be stored as python dictionary"""
return {
"trips_last_5min": count,
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
pipeline = ( # noqa F841 pylint: disable=unused-variable
p
| "read_from_pubsub"
>> beam.io.ReadFromPubSub(topic=topic).with_output_types(bytes)
| "window" >> None # TODO: Your code goes here.
| "count" >> beam.CombineGlobally(CountFn()).without_defaults()
| "format_for_bq" >> beam.Map(to_bq_format)
| "write_to_bq"
>> beam.io.WriteToBigQuery(
table_spec,
# WRITE_TRUNCATE not supported for streaming
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER,
)
)
result = p.run() # noqa F841 pylint: disable=unused-variable
# result.wait_until_finish() #only do this if running with DirectRunner
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
run()
|
GoogleCloudPlatform/asl-ml-immersion
|
notebooks/building_production_ml_systems/labs/taxicab_traffic/streaming_count.py
|
Python
|
apache-2.0
| 2,896
|
../../../../../../share/pyshared/Crypto/SelfTest/Cipher/test_ARC2.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/Crypto/SelfTest/Cipher/test_ARC2.py
|
Python
|
gpl-3.0
| 68
|
from method_decorator import method_decorator
__version__ = '0.0.1'
class virtualmethod(method_decorator):
"""
Decorator to prevent base class methods from being called directly.
"""
def __call__(self, *args, **kwargs):
if self.cls and self.cls.__dict__.has_key(self.__name__):
raise TypeError("Virtual method %s must be called from a subclass." % self.__name__)
return method_decorator.__call__( self, *args, **kwargs )
|
bgreenlee/virtualmethod
|
virtualmethod/core.py
|
Python
|
apache-2.0
| 475
|
# coding=utf8
"""
asm.py - (dis)assembly features.
(c) 2014 Samuel Groß
"""
from willie import web
from willie.module import commands, nickname_commands, example
from random import choice
from binascii import hexlify, unhexlify
import string
import re
import os
from subprocess import Popen, PIPE
@commands('disas', 'disas64', 'disassemble', 'disassemble64')
@example('.disas 66556689e590c9c3')
def disassemble(bot, trigger):
"""Disassemble x86 machine code."""
if not trigger.group(2):
return bot.reply('Nothing to disassemble')
try:
arg = trigger.group(2)
# remove all 0x
while "0x" in arg:
arg = arg.replace("0x","")
# remove everything except hex
arg = re.sub(r"[^a-fA-F0-9]", r"", arg)
code = unhexlify(arg)
except Exception:
return bot.say('Invalid hex sequence')
bits = 64 if '64' in trigger.group(1) else 32
filename = '/tmp/' + ''.join( choice(string.ascii_lowercase) for i in range(10)) + '.bin'
with open(filename, 'wb') as f:
f.write(code)
result = Popen(['ndisasm', '-b', str(bits), '-o', '0x1000', filename], stdout=PIPE).stdout.read()
os.remove(filename)
for line in result.split('\n'):
bot.say(line)
@commands('as', 'as64', 'assemble', 'assemble64')
@example('.as push ebp; mov ebp, esp; jmp 0x14')
def assemble(bot, trigger):
"""Assemble x86 instructions."""
code = trigger.group(2)
if not code:
return bot.reply('Nothing to assemble')
bits = 64 if '64' in trigger.group(1) else 32
filename = '/tmp/' + ''.join(choice(string.ascii_lowercase) for i in range(10)) + '.asm'
with open(filename, 'w') as f:
f.write('BITS %i\n' % bits + re.sub(r';\s*', ';\n', code))
p = Popen(['nasm', '-f', 'bin', '-o', filename[:-4], filename], stderr=PIPE)
p.wait()
os.remove(filename)
for line in p.stderr.read().split('\n'):
bot.say(line)
if p.returncode == 0:
with open(filename[:-4], 'rb') as f:
raw = f.read()
hex = hexlify(raw)
if hex:
bot.say(hex)
os.remove(filename[:-4])
def x86jmp(bot, instr):
"""Display information about a x86 conditional jump."""
if instr not in jxx:
return bot.say('I can\'t find anything about that instruction, sorry')
bot.say('%s : %s' % (instr, jxx[instr]))
def x86instr(bot, instr):
"""Display information about any x86 instruction thats no a conditional jump."""
raw = web.get('http://www.felixcloutier.com/x86/')
match = re.search('<tr><td><a href="./(?P<page>[A-Z:]*).html">%s</a></td><td>(?P<desc>[^<]*)</td></tr>' % instr, raw)
if not match:
return bot.say('I can\'t find anything about that instruction, sorry')
bot.say('%s : %s -- %s' % (instr, match.group('desc'), 'http://www.felixcloutier.com/x86/%s' % match.group('page')))
@commands('x86', 'instr', 'instruction')
def instruction(bot, trigger):
"""Display information about an x86 instruction."""
instr = trigger.group(2)
if not instr:
return bot.reply('Give me an instruction')
instr = instr.strip().upper()
if 'J' == instr[0] and not instr == 'JMP':
return x86jmp(bot, instr)
x86instr(bot, instr)
jxx = {
'JA' : 'Jump if above (CF=0 and ZF=0)',
'JAE' : 'Jump if above or equal (CF=0)',
'JB' : 'Jump if below (CF=1)',
'JBE' : 'Jump if below or equal (CF=1 or ZF=1)',
'JC' : 'Jump if carry (CF=1)',
'JCXZ' : 'Jump if CX register is 0',
'JECXZ': 'Jump if ECX register is 0',
'JRCXZ': 'Jump if RCX register is 0',
'JE' : 'Jump if equal (ZF=1)',
'JG' : 'Jump if greater (ZF=0 and SF=OF)',
'JGE' : 'Jump if greater or equal (SF=OF)',
'JL' : 'Jump if less (SF!=OF)',
'JLE' : 'Jump if less or equal (ZF=1 or SF!=OF)',
'JNA' : 'Jump if not above (CF=1 or ZF=1)',
'JNAE' : 'Jump if not above or equal (CF=1)',
'JNB' : 'Jump if not below (CF=0)',
'JNBE' : 'Jump if not below or equal (CF=0 and ZF=0)',
'JNC' : 'Jump if not carry (CF=0)',
'JNE' : 'Jump if not equal (ZF=0)',
'JNG' : 'Jump if not greater (ZF=1 or SF!=OF)',
'JNGE' : 'Jump if not greater or equal (SF!=OF)',
'JNL' : 'Jump if not less (SF=OF)',
'JNLE' : 'Jump if not less or equal (ZF=0 and SF=OF)',
'JNO' : 'Jump if not overflow (OF=0)',
'JNP' : 'Jump if not parity (PF=0)',
'JNS' : 'Jump if not sign (SF=0)',
'JNZ' : 'Jump if not zero (ZF=0)',
'JO' : 'Jump if overflow (OF=1)',
'JP' : 'Jump if parity (PF=1)',
'JPE' : 'Jump if parity even (PF=1)',
'JPO' : 'Jump if parity odd (PF=0)',
'JS' : 'Jump if sign (SF=1)'
}
|
saelo/willie-modules
|
asm.py
|
Python
|
mit
| 4,715
|
#!/usr/bin/env python
import glob
import sys
import os
import vtktools
import numpy
import pylab
import re
def get_filelist(sample, start):
def key(s):
return int(s.split('_')[-1].split('.')[0])
list = glob.glob("*.vtu")
list = [l for l in list if 'check' not in l]
vtu_nos = [float(s.split('_')[-1].split('.')[0]) for s in list]
vals = zip(vtu_nos, list)
vals.sort()
unzip = lambda l:tuple(apply(zip,l))
vtu_nos, list = unzip(vals)
shortlist = []
for file in list:
try:
os.stat(file)
except:
f_log.write("No such file: %s" % files)
sys.exit(1)
##### Start at the (start+1)th file.
##### Add every nth file by taking integer multiples of n; limit at 10 vtus max.
vtu_no = float(file.split('_')[-1].split('.')[0])
if ((max(vtu_nos)-start)/sample > 10):
sample=int((max(vtu_nos)-start)/10)
if vtu_no > start:
if (vtu_no%sample==0):
shortlist.append(file)
##### Append final file if a large number of files remain.
elif vtu_no==len(vtu_nos)-1 and (max(vtu_nos)-sample/4.0)>vtu_no:
shortlist.append(file)
return shortlist
#### taken from http://www.codinghorror.com/blog/archives/001018.html #######
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
##############################################################################
# There are shorter and more elegant version of the above, but this works
# on CX1, where this test might be run...
###################################################################
# Reattachment length:
def reattachment_length(filelist):
print "Calculating reattachment point locations using change of x-velocity sign\n"
nums=[]; results=[]; files = []
##### check for no files
if (len(filelist) == 0):
print "No files!"
sys.exit(1)
for file in filelist:
try:
os.stat(file)
except:
print "No such file: %s" % file
sys.exit(1)
files.append(file)
sort_nicely(files)
for file in files:
##### Read in data from vtu
datafile = vtktools.vtu(file)
##### Get time for plot:
t = min(datafile.GetScalarField("Time"))
print file, ', elapsed time = ', t
##### points near bottom surface, 0 < x < 20
pts=[]; no_pts = 82; offset = 0.1
x = 5.0
for i in range(1, no_pts):
pts.append((x, offset, 0.0))
x += 0.25
pts = numpy.array(pts)
##### Get x-velocity on bottom boundary
uvw = datafile.ProbeData(pts, "Velocity")
u = []
u = uvw[:,0]
points = []
for i in range(len(u)-1):
##### Hack to ignore division by zero entries in u.
##### All u should be nonzero away from boundary!
if((u[i] / u[i+1]) < 0. and not numpy.isinf(u[i] / u[i+1])):
##### interpolate between nodes. Correct for origin not at step.
p = pts[i][0] + (pts[i+1][0]-pts[i][0]) * (0.0-u[i]) / (u[i+1]-u[i]) -5.0
##### Ignore spurious corner points
if(p>1.0):
points.append(p)
##### Append actual reattachment point and time:
results.append([points[0],t])
return results
#########################################################################
# Velocity profiles:
def meanvelo(filelist,x,y):
print "\nRunning velocity profile script on files at times...\n"
##### check for no files
if (len(filelist) < 0):
print "No files!"
sys.exit(1)
##### create array of points. Correct for origin not at step.
pts=[]
for i in range(len(x)):
for j in range(len(y)):
pts.append([x[i]+5.0, y[j], 0.0])
pts=numpy.array(pts)
##### Create output array of correct shape
profiles=numpy.zeros([len(filelist), x.size, y.size], float)
time = numpy.zeros([len(filelist)], float)
filecount = 0
for file in filelist:
datafile = vtktools.vtu(file)
# Get time
t = min(datafile.GetScalarField("Time"))
print file, ', elapsed time = ', t
time[filecount] = t
##### Get x-velocity
uvw = datafile.ProbeData(pts, "Velocity")
umax = max(abs(datafile.GetVectorField("Velocity")[:,0]))
(ilen, jlen) = uvw.shape
u = uvw[:,0]/umax
u=u.reshape([x.size,y.size])
profiles[filecount,:,:] = u
filecount += 1
print "\n...Finished writing data files.\n"
return profiles, time
#########################################################################
def plot_length(Re,type,mesh,reattachment_length):
##### Plot time series of reattachment length using pylab(matplotlib)
plot1 = pylab.figure()
pylab.title("Time series of reattachment length: Re="+str(Re)+", "+str(type)+"-Re BCs, "+str(mesh)+" mesh")
pylab.xlabel('Time (s)')
pylab.ylabel('Reattachment Length (L/h)')
pylab.plot(reattachment_length[:,1], reattachment_length[:,0], marker = 'o', markerfacecolor='white', markersize=6, markeredgecolor='black', linestyle="solid")
pylab.savefig("../reatt_len_2D_"+str(Re)+"_"+str(type)+"_"+str(mesh)+".pdf")
return
def plot_meanvelo(Re,type,mesh,profiles,xarray,yarray,time):
##### Plot velocity profiles at different points behind step, and at 3 times using pylab(matplotlib)
plot1 = pylab.figure(figsize = (16.5, 8.5))
pylab.suptitle("Evolution of U-velocity: Re="+str(Re)+", "+str(type)+"-Re BCs, "+str(mesh)+" mesh", fontsize=20)
size = 15
ax = pylab.subplot(141)
shift=0.0
leg_end = []
for i in range(len(time)):
ax.plot(profiles[i,0,:]+shift,yarray, linestyle="solid")
shift+=0.0
leg_end.append("%.1f secs"%time[i])
pylab.legend((leg_end), loc="lower right")
ax.set_title('(a) x/h='+str(xarray[0]), fontsize=16)
#ax.grid("True")
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(size)
bx = pylab.subplot(142, sharex=ax, sharey=ax)
shift=0.0
for i in range(len(time)):
bx.plot(profiles[i,1,:]+shift,yarray, linestyle="solid")
shift+=0.0
bx.set_title('(a) x/h='+str(xarray[1]), fontsize=16)
#bx.grid("True")
for tick in bx.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
pylab.setp(bx.get_yticklabels(), visible=False)
cx = pylab.subplot(143, sharex=ax, sharey=ax)
shift=0.0
for i in range(len(time)):
cx.plot(profiles[i,2,:]+shift,yarray, linestyle="solid")
shift+=0.0
cx.set_title('(a) x/h='+str(xarray[2]), fontsize=16)
#bx.grid("True")
for tick in cx.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
pylab.setp(cx.get_yticklabels(), visible=False)
dx = pylab.subplot(144, sharex=ax, sharey=ax)
shift=0.0
for i in range(len(time)):
dx.plot(profiles[i,3,:]+shift,yarray, linestyle="solid")
shift+=0.0
dx.set_title('(a) x/h='+str(xarray[3]), fontsize=16)
#bx.grid("True")
for tick in dx.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
pylab.setp(dx.get_yticklabels(), visible=False)
pylab.axis([-0.2, 1., 0., 1.94])
bx.set_xlabel('Normalised U-velocity (U/Umax)', fontsize=24)
ax.set_ylabel('z/h', fontsize=24)
pylab.savefig("../velo_profiles_2d_"+str(Re)+"_"+str(type)+"_"+str(mesh)+".pdf")
return
#########################################################################
def main():
##### Which run is being processed?
Re = sys.argv[1]
type = sys.argv[2]
mesh = sys.argv[3]
print "Re, bc type, mesh: ", Re, type, mesh
##### Only process every nth file by taking integer multiples of n:
filelist = get_filelist(sample=30, start=10)
##### Call reattachment_length function
reatt_length = numpy.array(reattachment_length(filelist))
av_length = sum(reatt_length[:,0]) / len(reatt_length[:,0])
numpy.save("reatt_len_2D_"+str(Re)+"_"+str(mesh), reatt_length)
print "\nTime-averaged reattachment length (in step heights): ", av_length
plot_length(Re,type,mesh,reatt_length)
##### Points to generate profiles:
xarray = numpy.array([2.0, 4.0, 6.0, 10.0])
yarray = numpy.array([0.01,0.02,0.03,0.04,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,1.91,1.92,1.93,1.94])
##### Call meanvelo function
profiles, time = meanvelo(filelist, xarray, yarray)
numpy.save("velo_profiles_2d_"+str(Re)+"_"+str(mesh), profiles)
plot_meanvelo(Re,type,mesh,profiles,xarray,yarray,time)
pylab.show()
print "\nAll done.\n"
if __name__ == "__main__":
sys.exit(main())
|
FluidityProject/multifluids
|
examples/backward_facing_step_2d/postprocessor_2d.py
|
Python
|
lgpl-2.1
| 8,741
|
#!/usr/bin/env python3
"""
Copyright (c) 2013 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
"""
This file demonstrates using PyMata to control a stepper motor. It requires the use of the FirmataPlus
Arduino sketch included with this release.
It is based upon the following tutorial: https://learn.adafruit.com/adafruit-arduino-lesson-16-stepper-motors/overview
"""
from pymata_aio.pymata3 import PyMata3
# create a PyMata instance
# ping callback function
def vr(data):
# print('hello')
print('version')
print(data)
# create a PyMata instance
# create a PyMata instance
firmata = PyMata3(2)
# send the arduino a firmata reset
firmata.send_reset()
# configure the stepper to use pins 9.10,11,12 and specify 512 steps per revolution
firmata.stepper_config(180, [8, 9, 10, 11])
# allow time for config to complete
firmata.sleep(.5)
# move motor #0 500 steps forward at a speed of 20
firmata.stepper_step(20, 500)
# firmata.sleep(4)
# move motor #0 500 steps reverse at a speed of 20
# firmata.stepper_step(20, -500)
# close firmata
firmata.shutdown()
|
MrYsLab/pymata-aio
|
test/stepper.py
|
Python
|
agpl-3.0
| 1,746
|
'''
Created on 2016年2月23日
@author: Darren
'''
'''
Given an image represented by an NxN matrix,
where each pixel in the image is 4 bytes, write a method to rotate the image by 90 degrees.
Can you do this in place?
'''
'''
* clockwise rotate
* first reverse up to down, then swap the symmetry
* 1 2 3 7 8 9 7 4 1
* 4 5 6 => 4 5 6 => 8 5 2
* 7 8 9 1 2 3 9 6 3
*/
/*
* anticlockwise rotate
* first reverse left to right, then swap the symmetry
* 1 2 3 3 2 1 3 6 9
* 4 5 6 => 6 5 4 => 2 5 8
* 7 8 9 9 8 7 1 4 7
'''
def rotate(matrix):
matrix=matrix[::-1]
for i in range(len(matrix)):
for j in range(i+1,len(matrix)):
matrix[i][j],matrix[j][i]=matrix[j][i],matrix[i][j]
return matrix
def rotate2(A):
n = len(A)
for i in range(n//2):
for j in range(n-n//2):
A[i][j], A[n-1-j][i], A[n-1-i][n-1-j], A[j][n-1-i] = \
A[n-1-j][i], A[n-1-i][n-1-j], A[j][n-1-i], A[i][j]
return A
matrix=[[1,2,3],[4,5,6],[7,8,9]]
print(rotate(matrix))
matrix=[[1,2,3],[4,5,6],[7,8,9]]
print(rotate2(matrix))
|
darrencheng0817/AlgorithmLearning
|
Python/CTCI/1_6.py
|
Python
|
mit
| 1,138
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for SDK stages."""
from __future__ import print_function
import json
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../../..' % os.path.dirname(__file__)))
from chromite.cbuildbot import commands
from chromite.cbuildbot.stages import sdk_stages
from chromite.cbuildbot.stages import generic_stages_unittest
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import portage_util
class SDKBuildToolchainsStageTest(generic_stages_unittest.AbstractStageTest):
"""Tests SDK toolchain building."""
def setUp(self):
# This code has its own unit tests, so no need to go testing it here.
self.run_mock = self.PatchObject(commands, 'RunBuildScript')
def ConstructStage(self):
return sdk_stages.SDKBuildToolchainsStage(self._run)
def testNormal(self):
"""Basic run through the main code."""
self._Prepare('chromiumos-sdk')
self.RunStage()
self.assertEqual(self.run_mock.call_count, 2)
# Sanity check args passed to RunBuildScript.
for call in self.run_mock.call_args_list:
buildroot, cmd = call[0]
self.assertTrue(isinstance(buildroot, basestring))
self.assertTrue(isinstance(cmd, (tuple, list)))
for ele in cmd:
self.assertTrue(isinstance(ele, basestring))
class SDKPackageStageTest(generic_stages_unittest.AbstractStageTest):
"""Tests SDK package and Manifest creation."""
fake_packages = [('cat1/package', '1'), ('cat1/package', '2'),
('cat2/package', '3'), ('cat2/package', '4')]
fake_json_data = {}
fake_chroot = None
def setUp(self):
# Replace SudoRunCommand, since we don't care about sudo.
self.PatchObject(cros_build_lib, 'SudoRunCommand',
wraps=cros_build_lib.RunCommand)
# Prepare a fake chroot.
self.fake_chroot = os.path.join(self.build_root, 'chroot/build/amd64-host')
osutils.SafeMakedirs(self.fake_chroot)
osutils.Touch(os.path.join(self.fake_chroot, 'file'))
for package, v in self.fake_packages:
cpv = portage_util.SplitCPV('%s-%s' % (package, v))
key = '%s/%s' % (cpv.category, cpv.package)
self.fake_json_data.setdefault(key, []).append([v, {}])
def ConstructStage(self):
return sdk_stages.SDKPackageStage(self._run)
def testTarballCreation(self):
"""Tests whether we package the tarball and correctly create a Manifest."""
self._Prepare('chromiumos-sdk')
fake_tarball = os.path.join(self.build_root, 'built-sdk.tar.xz')
fake_manifest = os.path.join(self.build_root,
'built-sdk.tar.xz.Manifest')
self.PatchObject(portage_util, 'ListInstalledPackages',
return_value=self.fake_packages)
self.RunStage()
# Check tarball for the correct contents.
output = cros_build_lib.RunCommand(
['tar', '-I', 'xz', '-tvf', fake_tarball],
capture_output=True).output.splitlines()
# First line is './', use it as an anchor, count the chars, and strip as
# much from all other lines.
stripchars = len(output[0]) - 1
tar_lines = [x[stripchars:] for x in output]
self.assertNotIn('/build/amd64-host/', tar_lines)
self.assertIn('/file', tar_lines)
# Verify manifest contents.
real_json_data = json.loads(osutils.ReadFile(fake_manifest))
self.assertEqual(real_json_data['packages'],
self.fake_json_data)
class SDKTestStageTest(generic_stages_unittest.AbstractStageTest):
"""Tests SDK test phase."""
def setUp(self):
# This code has its own unit tests, so no need to go testing it here.
self.run_mock = self.PatchObject(cros_build_lib, 'RunCommand')
def ConstructStage(self):
return sdk_stages.SDKTestStage(self._run)
def testNormal(self):
"""Basic run through the main code."""
self._Prepare('chromiumos-sdk')
self.RunStage()
if __name__ == '__main__':
cros_test_lib.main()
|
mxOBS/deb-pkg_trusty_chromium-browser
|
third_party/chromite/cbuildbot/stages/sdk_stages_unittest.py
|
Python
|
bsd-3-clause
| 4,152
|
#!/usr/bin/python
from __future__ import print_function
import sys
lines = [l.rstrip().replace('\t', ' '*8) for l in sys.stdin.readlines()]
print('TITLE')
print(lines[0])
print()
print('SYNOPSIS')
for i,line in enumerate(lines[2:]):
if line.lstrip().startswith('-'):
optStart = i+2
break
print(line)
print('''DESCRIPTION
This program is part of the OpenImageIO (http://www.openimageio.org) tool suite.
Detailed documentation is avaliable in pdf format with the OpenImageIO
distribution.
''')
print('OPTIONS')
for line in lines[optStart:]:
if not line.startswith(' '):
print()
print(line)
elif not line.lstrip().startswith('-'):
print(line.lstrip())
else:
print(line)
print()
|
jeremyselan/oiio
|
src/doc/help2man_preformat.py
|
Python
|
bsd-3-clause
| 749
|
import os.path as op
from nose.tools import eq_, ok_
from flask_admin.contrib import fileadmin
from flask_admin import Admin
from flask import Flask
from . import setup
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def create_view():
app, admin = setup()
class MyFileAdmin(fileadmin.FileAdmin):
editable_extensions = ('txt',)
path = op.join(op.dirname(__file__), 'files')
view = MyFileAdmin(path, '/files/', name='Files')
admin.add_view(view)
return app, admin, view
def test_file_admin():
app, admin, view = create_view()
client = app.test_client()
# index
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
# edit
rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/edit/?path=dummy.txt', data=dict(
content='new_string'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
ok_('new_string' in rv.data.decode('utf-8'))
# rename
rv = client.get('/admin/myfileadmin/rename/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/rename/?path=dummy.txt', data=dict(
name='dummy_renamed.txt',
path='dummy.txt'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))
ok_('path=dummy.txt' not in rv.data.decode('utf-8'))
# upload
rv = client.get('/admin/myfileadmin/upload/')
eq_(rv.status_code, 200)
rv = client.post('/admin/myfileadmin/upload/', data=dict(
upload=(StringIO(""), 'dummy.txt'),
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))
# delete
rv = client.post('/admin/myfileadmin/delete/', data=dict(
path='dummy_renamed.txt'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed.txt' not in rv.data.decode('utf-8'))
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
# mkdir
rv = client.get('/admin/myfileadmin/mkdir/')
eq_(rv.status_code, 200)
rv = client.post('/admin/myfileadmin/mkdir/', data=dict(
name='dummy_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
ok_('path=dummy_dir' in rv.data.decode('utf-8'))
# rename - directory
rv = client.get('/admin/myfileadmin/rename/?path=dummy_dir')
eq_(rv.status_code, 200)
ok_('dummy_dir' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/rename/?path=dummy_dir', data=dict(
name='dummy_renamed_dir',
path='dummy_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed_dir' in rv.data.decode('utf-8'))
ok_('path=dummy_dir' not in rv.data.decode('utf-8'))
# delete - directory
rv = client.post('/admin/myfileadmin/delete/', data=dict(
path='dummy_renamed_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed_dir' not in rv.data.decode('utf-8'))
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
def test_modal_edit():
# bootstrap 2 - test edit_modal
app_bs2 = Flask(__name__)
admin_bs2 = Admin(app_bs2, template_mode="bootstrap2")
class EditModalOn(fileadmin.FileAdmin):
edit_modal = True
editable_extensions = ('txt',)
class EditModalOff(fileadmin.FileAdmin):
edit_modal = False
editable_extensions = ('txt',)
path = op.join(op.dirname(__file__), 'files')
edit_modal_on = EditModalOn(path, '/files/', endpoint='edit_modal_on')
edit_modal_off = EditModalOff(path, '/files/', endpoint='edit_modal_off')
admin_bs2.add_view(edit_modal_on)
admin_bs2.add_view(edit_modal_off)
client_bs2 = app_bs2.test_client()
# bootstrap 2 - ensure modal window is added when edit_modal is enabled
rv = client_bs2.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test edit modal disabled
rv = client_bs2.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3
app_bs3 = Flask(__name__)
admin_bs3 = Admin(app_bs3, template_mode="bootstrap3")
admin_bs3.add_view(edit_modal_on)
admin_bs3.add_view(edit_modal_off)
client_bs3 = app_bs3.test_client()
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
|
Widiot/simpleblog
|
venv/lib/python3.5/site-packages/flask_admin/tests/fileadmin/test_fileadmin.py
|
Python
|
mit
| 5,579
|
# -*- coding: utf-8 -*-
import re
import sys
import duralex.alinea_lexer as alinea_lexer
import duralex.tree
from duralex.tree import *
def debug(node, tokens, i, msg):
if '--debug' in sys.argv:
print(' ' * get_node_depth(node) + msg + ' ' + str(tokens[i:i+8]))
def is_number(token):
return re.compile('\d+').match(token)
def is_space(token):
return re.compile('^\s+$').match(token)
def parse_int(s):
return int(re.search(r'\d+', s).group())
def parse_roman_number(n):
romans_map = zip(
(1000, 900, 500, 400 , 100, 90 , 50 , 40 , 10 , 9 , 5 , 4 , 1),
( 'M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
)
n = n.upper()
i = res = 0
for d, r in romans_map:
while n[i:i + len(r)] == r:
res += d
i += len(r)
return res
def is_roman_number(token):
return re.compile(r"[IVXCLDM]+(er)?").match(token)
def is_number_word(word):
return word_to_number(word) >= 0
def word_to_number(word):
words = [
[u'un', u'une', u'premier', u'première'],
[u'deux', u'deuxième', u'second', u'seconde'],
[u'trois', u'troisième'],
[u'quatre', u'quatrième'],
[u'cinq', u'cinquième'],
[u'six', u'sixième'],
[u'sept', u'septième'],
[u'huit', u'huitième'],
[u'neuf', u'neuvième'],
[u'dix', u'dixième'],
[u'onze', u'onzième'],
[u'douze', u'douzième'],
[u'treize', u'treizième'],
[u'quatorze', u'quatorzième'],
[u'quinze', u'quinzième'],
[u'seize', u'seizième'],
]
word = word.lower()
word = word.replace(u'È', u'è')
for i in range(0, len(words)):
if word in words[i]:
return i + 1
return -1
def month_to_number(month):
return alinea_lexer.TOKEN_MONTH_NAMES.index(month) + 1
def parse_section_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_SECTION_REFERENCE,
'children': [],
})
debug(parent, tokens, i, 'parse_section_reference')
# la section {order}
if tokens[i].lower() == u'la' and tokens[i + 2] == u'section':
node['order'] = parse_int(tokens[i + 4]);
i += 6
# de la section {order}
elif tokens[i] == u'de' and tokens[i + 2] == u'la' and tokens[i + 4] == u'section':
node['order'] = parse_int(tokens[i + 6]);
i += 8
else:
remove_node(parent, node)
return i
i = parse_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_section_reference end')
return i
def parse_subsection_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_SUBSECTION_REFERENCE,
'children': [],
})
debug(parent, tokens, i, 'parse_subsection_reference')
# de la sous-section {order}
if tokens[i].lower() == u'la' and tokens[i + 2] == u'sous-section':
node['order'] = parse_int(tokens[i + 4]);
i += 6
# de la sous-section {order}
elif tokens[i] == u'de' and tokens[i + 2] == u'la' and tokens[i + 4] == u'sous-section':
node['order'] = parse_int(tokens[i + 6]);
i += 8
else:
remove_node(parent, node)
return i
i = parse_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_subsection_reference end')
return i
def parse_chapter_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_CHAPTER_REFERENCE,
'children': [],
})
debug(parent, tokens, i, 'parse_chapter_reference')
# du chapitre {order}
# le chapitre {order}
if tokens[i].lower() in [u'du', u'le'] and tokens[i + 2] == u'chapitre' and is_roman_number(tokens[i + 4]):
node['order'] = parse_roman_number(tokens[i + 4]);
i += 6
else:
remove_node(parent, node)
return i
i = parse_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_chapter_reference end')
return i
def parse_paragraph_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_PARAGRAPH_REFERENCE,
'children': [],
})
debug(parent, tokens, i, 'parse_paragraph_reference')
# du paragraphe {order}
# le paragraphe {order}
if tokens[i].lower() in [u'du', u'le'] and tokens[i + 2] == u'paragraphe':
node['order'] = parse_int(tokens[i + 4]);
i += 6
else:
remove_node(parent, node)
return i
i = parse_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_paragraph_reference end')
return i
def parse_subparagraph_definition(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_subparagraph_definition')
node = create_node(parent, {
'type': TYPE_SUBPARAGRAPH_DEFINITION,
'children': [],
})
j = i
# un sous-paragraphe[s] [{order}] [ainsi rédigé]
if is_number_word(tokens[i]) and tokens[i + 2].startswith(u'sous-paragraphe'):
count = word_to_number(tokens[i])
i += 4
# [{order}]
if is_number(tokens[i]):
node['order'] = parse_int(tokens[i])
# ainsi rédigé
if (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(parse_quote, tokens, i, node)
else:
remove_node(parent, node)
debug(parent, tokens, i, 'parse_subparagraph_definition none')
return j
debug(parent, tokens, i, 'parse_subparagraph_definition end')
return i
def parse_law_reference(tokens, i, parent):
if i >= len(tokens):
return i
j = i
node = create_node(parent, {
'type': TYPE_LAW_REFERENCE,
'id': '',
'children': [],
})
debug(parent, tokens, i, 'parse_law_reference')
# de l'ordonnance
# l'ordonnance
if i + 4 < len(tokens) and (tokens[i + 2] == u'ordonnance' or tokens[i + 4] == u'ordonnance'):
node['lawType'] = 'ordonnance'
i = alinea_lexer.skip_to_token(tokens, i, u'ordonnance') + 2
# de la loi
# la loi
elif i + 4 < len(tokens) and ((tokens[i] == u'la' and tokens[i + 2] == u'loi') or (tokens[i] == u'de' and tokens[i + 4] == u'loi')):
i = alinea_lexer.skip_to_token(tokens, i, u'loi') + 2
# de la même loi
elif tokens[i].lower() == u'de' and tokens[i + 2] == u'la' and tokens[i + 4] == u'même' and tokens[i + 6] == u'loi':
i += 8
law_refs = filter_nodes(
get_root(parent),
lambda n: 'type' in n and n['type'] == TYPE_LAW_REFERENCE
)
# the lduralex.tree.one in order of traversal is the previous one in order of syntax
# don't forget the current node is in the list too => -2 instead of -1
law_ref = copy_node(law_refs[-2], False)
push_node(parent, law_ref)
remove_node(parent, node)
node = law_ref
else:
remove_node(parent, node)
return i
if i < len(tokens) and tokens[i] == u'organique':
node['lawType'] = 'organic'
i += 2
if node['id'] == '':
i = alinea_lexer.skip_to_token(tokens, i, u'n°') + 1
# If we didn't find the "n°" token, the reference is incomplete and we forget about it.
if i >= len(tokens):
remove_node(parent, node)
return j
i = alinea_lexer.skip_spaces(tokens, i)
node['id'] = tokens[i]
# skip {id} and the following space
i += 2
if i < len(tokens) and tokens[i] == u'du':
node['lawDate'] = tokens[i + 6] + u'-' + str(month_to_number(tokens[i + 4])) + u'-' + tokens[i + 2]
# skip {lawDate} and the following space
i += 7
i = alinea_lexer.skip_spaces(tokens, i)
if i < len(tokens) and tokens[i] == u'modifiant':
j = alinea_lexer.skip_to_token(tokens, i, 'code')
if j < len(tokens):
i = parse_code_reference(tokens, j, node)
# les mots
i = parse_one_of(
[
parse_word_reference,
],
tokens,
i,
node
)
debug(parent, tokens, i, 'parse_law_reference end')
return i
def parse_multiplicative_adverb(tokens, i, node):
if i >= len(tokens):
return i
adverbs = alinea_lexer.TOKEN_MULTIPLICATIVE_ADVERBS.sort(key = lambda s: -len(s))
for adverb in alinea_lexer.TOKEN_MULTIPLICATIVE_ADVERBS:
if tokens[i].endswith(adverb):
node['is' + adverb.title()] = True;
# skip {multiplicativeAdverb} and the following space
i += 1
i = alinea_lexer.skip_spaces(tokens, i)
return i
return i
def parse_definition(tokens, i, parent):
if i >= len(tokens):
return i
i = parse_one_of(
[
parse_article_definition,
parse_alinea_definition,
parse_mention_definition,
parse_header1_definition,
parse_header2_definition,
parse_header3_definition,
parse_sentence_definition,
parse_word_definition,
parse_title_definition,
parse_subparagraph_definition
],
tokens,
i,
parent
)
return i
def parse_sentence_definition(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_sentence_definition')
j = i
# {count} phrases
if is_number_word(tokens[i]) and tokens[i + 2].startswith(u'phrase'):
count = word_to_number(tokens[i])
i += 4
# ainsi rédigé
# est rédigé
# est ainsi rédigé
if (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
# we expect {count} definitions => {count} quotes
# but they don't always match, so for now we parse all of the available contents
# FIXME: issue a warning because the expected count doesn't match?
i = alinea_lexer.skip_spaces(tokens, i)
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(
parse_quote,
tokens,
i,
lambda : create_node(parent, {'type': TYPE_SENTENCE_DEFINITION, 'children': []})
)
else:
create_node(parent, {'type': TYPE_SENTENCE_DEFINITION, 'count': count})
else:
debug(parent, tokens, i, 'parse_sentence_definition none')
return j
debug(parent, tokens, i, 'parse_sentence_definition end')
return i
def parse_word_definition(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_WORD_DEFINITION,
})
debug(parent, tokens, i, 'parse_word_definition')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le mot
# les mots
# des mots
if tokens[i].lower() in [u'le', u'les', u'des'] and tokens[i + 2].startswith(u'mot'):
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(parse_quote, tokens, i, node)
# i = alinea_lexer.skip_spaces(tokens, i)
# le nombre
# le chiffre
# le taux
elif tokens[i].lower() == u'le' and tokens[i + 2] in [u'nombre', u'chiffre', u'taux']:
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_quote(tokens, i, node)
# "
elif tokens[i] == alinea_lexer.TOKEN_DOUBLE_QUOTE_OPEN:
i = parse_for_each(parse_quote, tokens, i, node)
i = alinea_lexer.skip_spaces(tokens, i)
# la référence
# les références
elif tokens[i].lower() in [u'la', u'les'] and tokens[i + 2].startswith(u'référence'):
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_quote(tokens, i, node)
else:
debug(parent, tokens, i, 'parse_word_definition none')
remove_node(parent, node)
return j
debug(parent, tokens, i, 'parse_word_definition end')
return i
def parse_article_definition(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_ARTICLE_DEFINITION,
'children': [],
})
debug(parent, tokens, i, 'parse_article_definition')
# un article
if tokens[i].lower() == u'un' and tokens[i + 2] == u'article':
i += 4
# l'article
elif tokens[i].lower() == u'l' and tokens[i + 2] == u'article':
i += 4
else:
debug(parent, tokens, i, 'parse_article_definition none')
remove_node(parent, node)
return i
i = parse_article_id(tokens, i, node)
i = alinea_lexer.skip_spaces(tokens, i)
if i < len(tokens) and tokens[i] == u'ainsi' and tokens[i + 2] == u'rédigé':
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(parse_quote, tokens, i, node)
debug(parent, tokens, i, 'parse_article_definition end')
return i
def parse_alinea_definition(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_alinea_definition')
# {count} alinéa(s)
if is_number_word(tokens[i]) and tokens[i + 2].startswith(u'alinéa'):
count = word_to_number(tokens[i])
i += 4
# ainsi rédigé
# est rédigé
# est ainsi rédigé
if (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
# we expect {count} definitions => {count} quotes
# but they don't always match, so for now we parse all of the available contents
# FIXME: issue a warning because the expected count doesn't match?
i = alinea_lexer.skip_spaces(tokens, i)
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(
parse_quote,
tokens,
i,
lambda: create_node(parent, {'type': TYPE_ALINEA_DEFINITION, 'children': []})
)
else:
node = create_node(parent, {'type': TYPE_ALINEA_DEFINITION, 'count': count})
else:
debug(parent, tokens, i, 'parse_alinea_definition none')
return i
debug(parent, tokens, i, 'parse_alinea_definition end')
return i
def parse_mention_definition(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_MENTION_DEFINITION,
})
debug(parent, tokens, i, 'parse_mention_definition')
# la mention
if tokens[i].lower() == u'la' and tokens[i + 2] == u'mention':
i += 4
else:
debug(parent, tokens, i, 'parse_mention_definition none')
remove_node(parent, node)
return i
# :
if tokens[i] == ':':
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(parse_quote, tokens, i, node)
debug(parent, tokens, i, 'parse_mention_definition end')
return i
def parse_header1_definition(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_header1_definition')
# un {romanPartNumber}
if tokens[i].lower() == u'un' and is_roman_number(tokens[i + 2]):
node = create_node(parent, {
'type': TYPE_HEADER1_DEFINITION,
'order': parse_roman_number(tokens[i + 2]),
})
i += 4
i = alinea_lexer.skip_spaces(tokens, i)
if i + 2 < len(tokens) and tokens[i] == u'ainsi' and tokens[i + 2] == u'rédigé':
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_quote(tokens, i, node)
# des {start} à {end}
elif (tokens[i].lower() == u'des' and is_roman_number(tokens[i + 2])
and tokens[i + 4] == u'à' and is_roman_number(tokens[i + 6])):
start = parse_roman_number(tokens[i + 2])
end = parse_roman_number(tokens[i + 6])
i += 8
# ainsi rédigés
if (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
i = alinea_lexer.skip_to_quote_start(tokens, i + 4)
i = parse_for_each(
parse_quote,
tokens,
i,
lambda : create_node(parent, {'type': TYPE_HEADER1_DEFINITION, 'order': start + len(parent['children']), 'children': []})
)
else:
debug(parent, tokens, i, 'parse_header1_definition end')
return i
return i
def parse_header2_definition(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_header2_definition')
# un ... ° ({articlePartRef})
if tokens[i].lower() == u'un' and ''.join(tokens[i + 2:i + 5]) == u'...' and tokens[i + 6] == u'°':
node = create_node(parent, {
'type': TYPE_HEADER2_DEFINITION,
})
# FIXME: should we simply ignore the 'order' field all together?
node['order'] = '...'
i += 8
i = alinea_lexer.skip_spaces(tokens, i)
if tokens[i] == u'ainsi' and tokens[i + 2] == u'rédigé':
i = alinea_lexer.skip_to_quote_start(tokens, i + 4)
i = parse_quote(tokens, i, node)
# un {order}° ({orderLetter}) ({multiplicativeAdverb}) ({articlePartRef})
elif tokens[i].lower() == u'un' and re.compile(u'\d+°').match(tokens[i + 2]):
node = create_node(parent, {
'type': TYPE_HEADER2_DEFINITION,
})
node['order'] = parse_int(tokens[i + 2])
i += 4
if re.compile(u'[A-Z]').match(tokens[i]):
node['subOrder'] = tokens[i]
i += 2
i = parse_multiplicative_adverb(tokens, i, node)
i = parse_article_part_reference(tokens, i, node)
i = alinea_lexer.skip_spaces(tokens, i)
if i < len(tokens) and tokens[i] == u'ainsi' and tokens[i + 2] == u'rédigé':
i = alinea_lexer.skip_to_quote_start(tokens, i + 4)
i = parse_quote(tokens, i, node)
# des {start}° à {end}°
elif (tokens[i].lower() == u'des' and re.compile(u'\d+°').match(tokens[i + 2])
and tokens[i + 4] == u'à' and re.compile(u'\d+°').match(tokens[i + 6])):
start = parse_int(tokens[i + 2])
end = parse_int(tokens[i + 6])
i += 8
# ainsi rédigés
if (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
i = alinea_lexer.skip_to_quote_start(tokens, i + 4)
i = parse_for_each(
parse_quote,
tokens,
i,
lambda : create_node(parent, {'type': TYPE_HEADER2_DEFINITION, 'order': start + len(parent['children']), 'children': []})
)
else:
debug(parent, tokens, i, 'parse_header2_definition end')
return i
return i
def parse_header3_definition(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_header3_definition')
# un {orderLetter}
if tokens[i].lower() == u'un' and re.compile(u'^[a-z]$').match(tokens[i + 2]):
node = create_node(parent, {
'type': TYPE_HEADER3_DEFINITION,
'order': ord(str(tokens[i + 2])) - ord('a') + 1,
})
i += 4
i = alinea_lexer.skip_spaces(tokens, i)
if i < len(tokens) and tokens[i] == u'ainsi' and tokens[i + 2] == u'rédigé':
i = alinea_lexer.skip_to_quote_start(tokens, i + 4)
i = parse_quote(tokens, i, node)
# des {orderLetter} à {orderLetter}
elif (tokens[i].lower() == u'des' and re.compile(u'^[a-z]$').match(tokens[i + 2])
and tokens[i + 4] == u'à' and re.compile(u'^[a-z]$').match(tokens[i + 6])):
start = ord(str(tokens[i + 2])) - ord('a') + 1
end = ord(str(tokens[i + 6])) - ord('a') + 1
i += 8
# ainsi rédigés
if (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
i = alinea_lexer.skip_to_quote_start(tokens, i + 4)
i = parse_for_each(
parse_quote,
tokens,
i,
lambda : create_node(parent, {'type': TYPE_HEADER3_DEFINITION, 'order': start + len(parent['children']), 'children': []})
)
else:
debug(parent, tokens, i, 'parse_header3_definition end')
return i
return i
def parse_article_id(tokens, i, node):
node['id'] = ''
# article {articleId}
if i < len(tokens) and tokens[i] == 'L' and tokens[i + 1] == '.':
while not re.compile('\d+(-\d+)?').match(tokens[i]):
node['id'] += tokens[i]
i += 1
if i < len(tokens) and re.compile('\d+(-\d+)?').match(tokens[i]):
node['id'] += tokens[i]
# skip {articleId} and the following space
i += 1
i = alinea_lexer.skip_spaces(tokens, i)
# {articleId} {articleLetter}
# FIXME: handle the {articleLetter}{multiplicativeAdverb} case?
if i < len(tokens) and re.compile('^[A-Z]$').match(tokens[i]):
node['id'] += ' ' + tokens[i]
# skip {articleLetter} and the following space
i += 1
i = alinea_lexer.skip_spaces(tokens, i)
i = parse_multiplicative_adverb(tokens, i, node)
if not node['id'] or is_space(node['id']):
del node['id']
return i
def parse_title_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_TITLE_REFERENCE,
'children': [],
})
debug(parent, tokens, i, 'parse_title_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le titre {order}
# du titre {order}
if tokens[i].lower() in [u'le', u'du'] and tokens[i + 2] == u'titre' and is_roman_number(tokens[i + 4]):
node['order'] = parse_roman_number(tokens[i + 4])
i += 6
i = parse_multiplicative_adverb(tokens, i, node)
else:
debug(parent, tokens, i, 'parse_title_reference none')
remove_node(parent, node)
return j
i = parse_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_title_reference end')
return i
def parse_title_definition(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_TITLE_DEFINITION,
'children': [],
})
debug(parent, tokens, i, 'parse_title_definition')
# un titre {order}
if tokens[i].lower() == u'un' and tokens[i + 2] == u'titre' and is_roman_number(tokens[i + 4]):
node['order'] = parse_roman_number(tokens[i + 4])
i += 6
i = parse_multiplicative_adverb(tokens, i, node)
else:
debug(parent, tokens, i, 'parse_title_definition none')
remove_node(parent, node)
return i
i = alinea_lexer.skip_spaces(tokens, i)
if tokens[i] == u'ainsi' and tokens[i + 2] == u'rédigé':
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(parse_quote, tokens, i, node)
debug(parent, tokens, i, 'parse_title_definition end')
return i
def parse_code_part_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_CODE_PART_REFERENCE,
'children': [],
})
debug(parent, tokens, i, 'parse_code_part_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# la {order} partie [{codeReference}]
if tokens[i] == u'la' and is_number_word(tokens[i + 2]) and tokens[i + 4] == u'partie':
node['order'] = word_to_number(tokens[i + 2])
i += 6
i = parse_code_reference(tokens, i, node)
# de la {order} partie [{codeReference}]
elif tokens[i] == u'de' and tokens[i + 2] == u'la' and is_number_word(tokens[i + 4]) and tokens[i + 6] == u'partie':
node['order'] = word_to_number(tokens[i + 4])
i += 8
i = parse_code_reference(tokens, i, node)
else:
debug(parent, tokens, i, 'parse_code_part_reference none')
remove_node(parent, node)
return j
debug(parent, tokens, i, 'parse_code_part_reference end')
return i
def parse_book_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_BOOK_REFERENCE,
'children': [],
})
debug(parent, tokens, i, 'parse_book_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le livre {order}
# du livre {order}
if tokens[i].lower() in [u'le', u'du'] and tokens[i + 2] == u'livre' and is_roman_number(tokens[i + 4]):
node['order'] = parse_roman_number(tokens[i + 4])
i += 6
else:
debug(parent, tokens, i, 'parse_book_reference none')
remove_node(parent, node)
return j
i = parse_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_book_reference end')
return i
def parse_scope(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_scope')
node = None
# la fin de
if tokens[i] == u'la' and tokens[i + 2] == u'fin' and tokens[i + 4] in [u'de', u'du']:
i += 4
parent['scope'] = 'end'
debug(parent, tokens, i, 'parse_scope end')
return i
def parse_bill_article_reference(tokens, i, parent):
if i >= len(tokens):
return i
debug(parent, tokens, i, 'parse_bill_article_reference')
# cet article
if tokens[i] == u'cet' and tokens[i + 2] == u'article':
i += 4
article_refs = filter_nodes(
get_root(parent),
lambda n: 'type' in n and n['type'] == TYPE_BILL_ARTICLE_REFERENCE
)
# the last one in order of traversal is the previous one in order of syntax
article_ref = copy_node(article_refs[-1])
push_node(parent, article_ref)
debug(parent, tokens, i, 'parse_bill_article_reference end')
return i
def parse_article_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_ARTICLE_REFERENCE,
})
debug(parent, tokens, i, 'parse_article_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# de l'article
# à l'article
if tokens[i].lower() in [u'de', u'à'] and tokens[i + 2] == u'l' and tokens[i + 4] == u'article':
i += 5
i = alinea_lexer.skip_spaces(tokens, i)
i = parse_article_id(tokens, i, node)
# l'article
elif tokens[i].lower() == u'l' and tokens[i + 2].startswith(u'article'):
i += 3
i = alinea_lexer.skip_spaces(tokens, i)
i = parse_article_id(tokens, i, node)
# les articles
# des articles
elif tokens[i].lower() in [u'des', u'les'] and tokens[i + 2].startswith(u'article'):
i += 3
i = alinea_lexer.skip_spaces(tokens, i)
i = parse_article_id(tokens, i, node)
i = alinea_lexer.skip_spaces(tokens, i)
nodes = []
while tokens[i] == u',':
i += 2
nodes.append(create_node(parent, {'type':TYPE_ARTICLE_REFERENCE}))
i = parse_article_id(tokens, i, nodes[-1])
i = alinea_lexer.skip_spaces(tokens, i)
if tokens[i] == u'et':
i += 2
nodes.append(create_node(parent, {'type':TYPE_ARTICLE_REFERENCE}))
i = parse_article_id(tokens, i, nodes[-1])
# i = parse_article_part_reference(tokens, i, node)
# de la loi
# de l'ordonnance
# du code
# les mots
# l'alinéa
i = parse_one_of(
[
parse_law_reference,
parse_code_reference,
parse_word_reference,
parse_alinea_reference
],
tokens,
i,
node
)
# if there are are descendant *-reference nodes parsed by the previous call to
# parse_one_of, we must make sure they apply to all the article-reference nodes
# we just created
if len(node['children']) != 0:
for n in nodes:
for c in node['children']:
push_node(n, copy_node(c))
return i
# elif tokens[i] == u'un' and tokens[i + 2] == u'article':
# i += 4
# Article {articleNumber}
elif tokens[i].lower().startswith(u'article'):
i += 1
i = alinea_lexer.skip_spaces(tokens, i)
i = parse_article_id(tokens, i, node)
# le même article
# du même article
elif tokens[i].lower() in [u'le', u'du'] and tokens[i + 2] == u'même' and tokens[i + 4] == u'article':
i += 6
article_refs = filter_nodes(
get_root(parent),
lambda n: 'type' in n and n['type'] == TYPE_ARTICLE_REFERENCE
)
# the last one in order of traversal is the previous one in order of syntax
# don't forget the current node is in the list too => -2 instead of -1
article_ref = copy_node(article_refs[-2])
push_node(parent, article_ref)
remove_node(parent, node)
else:
remove_node(parent, node)
return j
# i = parse_article_part_reference(tokens, i, node)
# de la loi
# de l'ordonnance
# du code
# les mots
# l'alinéa
i = parse_one_of(
[
parse_law_reference,
parse_code_reference,
parse_word_reference,
parse_alinea_reference
],
tokens,
i,
node
)
# i = parse_quote(tokens, i, node)
debug(parent, tokens, i, 'parse_article_reference end')
return i
def parse_position(tokens, i, node):
if i >= len(tokens):
return i
j = i
# i = alinea_lexer.skip_to_next_word(tokens, i)
# après
if tokens[i].lower() == u'après':
node['position'] = 'after'
i += 2
# avant
elif tokens[i].lower() == u'avant':
node['position'] = 'before'
i += 2
# au début
elif tokens[i].lower() == u'au' and tokens[i + 2] == u'début':
node['position'] = 'beginning'
i += 4
# à la fin du {article}
elif tokens[i].lower() == u'à' and tokens[i + 2] == u'la' and tokens[i + 4] == u'fin':
node['position'] = 'end'
i += 6
else:
return j
return i
def parse_alinea_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_ALINEA_REFERENCE,
})
debug(parent, tokens, i, 'parse_alinea_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le {order} alinéa
# du {order} alinéa
# au {order} alinéa
if tokens[i].lower() in [u'du', u'le', u'au'] and is_number_word(tokens[i + 2]) and tokens[i + 4].startswith(u'alinéa'):
node['order'] = word_to_number(tokens[i + 2])
i += 6
# l'alinéa
elif tokens[i].lower() == u'l' and tokens[i + 2].startswith(u'alinéa'):
node['order'] = parse_int(tokens[i + 4])
i += 6
# de l'alinéa
elif tokens[i] == 'de' and tokens[i + 2].lower() == [u'l'] and tokens[i + 4].startswith(u'alinéa'):
i += 6
# {order} {partType}
elif is_number_word(tokens[i].lower()) and tokens[i + 2].startswith(u'alinéa'):
node['order'] = word_to_number(tokens[i])
i += 4
# aux {count} {position} alinéas
# elif tokens[i].lowers() == u'aux' and is_number_word(tokens[i + 2]) and tokens[i + 6] == u'alinéas':
# le même alinéa
elif tokens[i].lower() in [u'le'] and tokens[i + 2] == u'même' and tokens[i + 4] == u'alinéa':
i += 6
alinea_refs = filter_nodes(
get_root(parent),
lambda n: 'type' in n and n['type'] == TYPE_ALINEA_REFERENCE
)
# the lduralex.tree.one in order of traversal is the previous one in order of syntax
# don't forget the current node is in the list too => -2 instead of -1
alinea_ref = copy_node(alinea_refs[-2])
push_node(parent, alinea_ref)
remove_node(parent, node)
# du dernier alinéa
# au dernier alinéa
# le dernier alinéa
elif tokens[i].lower() in [u'du', u'au', u'le'] and tokens[i + 2] == u'dernier' and tokens[i + 4] == u'alinéa':
node['order'] = -1
i += 6
# à l'avant dernier alinéa
elif tokens[i].lower() == u'à' and tokens[i + 4] == u'avant' and tokens[i + 6] == u'dernier' and tokens[i + 8] == u'alinéa':
node['order'] = -2
i += 10
# l'avant-dernier alinéa
elif tokens[i].lower() == u'l' and tokens[i + 2] == u'avant-dernier' and tokens[i + 4] == u'alinéa':
node['order'] = -2
i += 6
# à l'avant-dernier alinéa
elif tokens[i].lower() == u'à' and tokens[i + 2] == u'l' and tokens[i + 4] == u'avant-dernier' and tokens[i + 6] == u'alinéa':
node['order'] = -2
i += 10
# alinéa {order}
elif tokens[i].lower() == u'alinéa' and is_number(tokens[i + 2]):
node['order'] = parse_int(tokens[i + 2])
i += 4
# les alinéas
# des alinéas
elif tokens[i].lower() in [u'les', u'des'] and tokens[i + 2] == u'alinéas':
node['order'] = parse_int(tokens[i + 4])
i += 5
i = alinea_lexer.skip_spaces(tokens, i)
nodes = []
while tokens[i] == u',':
nodes.append(create_node(parent, {
'type': TYPE_ALINEA_REFERENCE,
'order': parse_int(tokens[i + 2])
}))
i += 3
i = alinea_lexer.skip_spaces(tokens, i)
if tokens[i] == u'et':
i += 2
nodes.append(create_node(parent, {
'type': TYPE_ALINEA_REFERENCE,
'order': parse_int(tokens[i])
}))
i += 2
i = parse_article_part_reference(tokens, i, node)
if len(node['children']) != 0:
for n in nodes:
for c in node['children']:
push_node(n, copy_node(c))
return i
else:
debug(parent, tokens, i, 'parse_alinea_reference none')
remove_node(parent, node)
return j
i = parse_article_part_reference(tokens, i, node)
# i = parse_quote(tokens, i, node)
debug(parent, tokens, i, 'parse_alinea_reference end')
return i
def parse_sentence_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_SENTENCE_REFERENCE,
})
debug(parent, tokens, i, 'parse_sentence_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# une phrase
# la phrase
if tokens[i].lower() in [u'la', u'une'] and tokens[i + 2] == 'phrase':
i += 4
# de la {partNumber} phrase
elif tokens[i].lower() == u'de' and tokens[i + 2] == u'la' and is_number_word(tokens[i + 4]) and tokens[i + 6] == u'phrase':
node['order'] = word_to_number(tokens[i + 4])
i += 8
# la {partNumber} phrase
elif tokens[i].lower() == u'la' and is_number_word(tokens[i + 2]) and tokens[i + 4] == u'phrase':
node['order'] = word_to_number(tokens[i + 2])
i += 6
# à la {partNumber} phrase
# À la {partNumber} phrase
elif (tokens[i] == u'à' or tokens[i] == u'À') and tokens[i + 2].lower() == u'la' and is_number_word(tokens[i + 4]) and tokens[i + 6] == u'phrase':
node['order'] = word_to_number(tokens[i + 4])
i += 8
# la dernière phrase
elif tokens[i].lower() == u'la' and tokens[i + 2] == u'dernière' and tokens[i + 4] == u'phrase':
node['order'] = -1
i += 6
# les {n} première phrases
elif tokens[i].lower() == u'les' and is_number_word(tokens[i + 2]) and tokens[i + 4] == u'premières' and tokens[i + 6] == u'phrases':
node['order'] = [0, word_to_number(tokens[i + 2])]
i += 8
else:
debug(parent, tokens, i, 'parse_sentence_reference none')
remove_node(parent, node)
return j
i = parse_article_part_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_sentence_reference end')
fix_incomplete_references(parent, node)
return i
def fix_incomplete_references(parent, node):
if len(parent['children']) >= 2:
for child in parent['children']:
if child['type'] == TYPE_INCOMPLETE_REFERENCE:
# set the actual reference type
child['type'] = node['type']
# copy all the child of the fully qualified reference node
for c in node['children']:
push_node(child, copy_node(c))
def parse_back_reference(tokens, i, parent):
if i >= len(tokens):
return i
if tokens[i] == u'Il':
refs = filter_nodes(
get_root(parent),
lambda n: is_reference(n)
)
for j in reversed(range(0, len(refs))):
if get_node_depth(refs[j]) <= get_node_depth(parent):
push_node(parent, copy_node(refs[j]))
break
i += 2
return i
def parse_incomplete_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_INCOMPLETE_REFERENCE,
})
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
if tokens[i].lower() == u'à' and tokens[i + 2] in [u'le', u'la'] and is_number_word(tokens[i + 4]):
node['order'] = word_to_number(tokens[i + 4])
i += 6
elif tokens[i].lower() in [u'le', u'la'] and is_number_word(tokens[i + 2]):
node['order'] = word_to_number(tokens[i + 2])
i += 4
elif j == i:
remove_node(parent, node)
return j
return i
def parse_word_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_WORD_REFERENCE
})
debug(parent, tokens, i, 'parse_word_reference')
j = i
i = alinea_lexer.skip_to_next_word(tokens, i)
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le mot
# les mots
# des mots
if tokens[i].lower() in [u'le', u'les', u'des'] and tokens[i + 2].startswith(u'mot'):
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_for_each(parse_quote, tokens, i, node)
i = alinea_lexer.skip_to_next_word(tokens, i)
i = parse_reference(tokens, i, node)
# le nombre
# le chiffre
# le taux
elif tokens[i].lower() == u'le' and tokens[i + 2] in [u'nombre', u'chiffre', u'taux']:
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_quote(tokens, i, node)
# la référence
# les références
elif tokens[i].lower() in [u'la', u'les'] and tokens[i + 2].startswith(u'référence'):
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_quote(tokens, i, node)
else:
debug(parent, tokens, i, 'parse_word_reference none')
remove_node(parent, node)
return j
debug(parent, tokens, i, 'parse_word_reference end')
return i
def parse_header2_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_HEADER2_REFERENCE
})
debug(parent, tokens, i, 'parse_header2_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le {order}° ({multiplicativeAdverb}) ({articlePartRef})
# du {order}° ({multiplicativeAdverb}) ({articlePartRef})
# au {order}° ({multiplicativeAdverb}) ({articlePartRef})
if tokens[i].lower() in [u'le', u'du', u'au'] and re.compile(u'\d+°').match(tokens[i + 2]):
node['order'] = parse_int(tokens[i + 2])
i += 4
i = parse_multiplicative_adverb(tokens, i, node)
i = parse_article_part_reference(tokens, i, node)
# le même {order}° ({multiplicativeAdverb}) ({articlePartRef})
# du même {order}° ({multiplicativeAdverb}) ({articlePartRef})
# au même {order}° ({multiplicativeAdverb}) ({articlePartRef})
elif tokens[i].lower() in [u'le', u'du', u'au'] and tokens[i + 2] == u'même' and re.compile(u'\d+°').match(tokens[i + 4]):
node['order'] = parse_int(tokens[i + 4])
i += 6
i = parse_multiplicative_adverb(tokens, i, node)
i = parse_article_part_reference(tokens, i, node)
else:
debug(parent, tokens, i, 'parse_header2_reference none')
remove_node(parent, node)
return j
# i = parse_quote(tokens, i, node)
debug(parent, tokens, i, 'parse_header2_reference end')
return i
def parse_header3_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_HEADER3_REFERENCE
})
debug(parent, tokens, i, 'parse_header3_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le {orderLetter} ({articlePartRef})
# du {orderLetter} ({articlePartRef})
# au {orderLetter} ({articlePartRef})
if tokens[i].lower() in [u'le', u'du', u'au'] and re.compile(u'^[a-z]$').match(tokens[i + 2]):
node['order'] = ord(str(tokens[i + 2])) - ord('a') + 1
i += 4
i = parse_multiplicative_adverb(tokens, i, node)
i = parse_article_part_reference(tokens, i, node)
# le même {orderLetter} ({articlePartRef})
# du même {orderLetter} ({articlePartRef})
# au même {orderLetter} ({articlePartRef})
elif tokens[i].lower() in [u'le', u'du', u'au'] and tokens[i + 2] == u'même' and re.compile(u'^[a-z]$').match(tokens[i + 4]):
node['order'] = ord(str(tokens[i + 4])) - ord('a') + 1
i += 6
i = parse_multiplicative_adverb(tokens, i, node)
i = parse_article_part_reference(tokens, i, node)
else:
debug(parent, tokens, i, 'parse_header3_reference none')
remove_node(parent, node)
return j
# i = parse_quote(tokens, i, node)
debug(parent, tokens, i, 'parse_header3_reference end')
return i
def parse_header1_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_HEADER1_REFERENCE,
})
debug(parent, tokens, i, 'parse_header1_reference')
j = i
i = parse_position(tokens, i, node)
i = parse_scope(tokens, i, node)
# le {romanPartNumber}
# du {romanPartNumber}
# un {romanPartNumber}
if tokens[i].lower() in [u'le', u'du', u'un'] and is_roman_number(tokens[i + 2]):
node['order'] = parse_roman_number(tokens[i + 2])
i += 4
else:
debug(parent, tokens, i, 'parse_header1_reference end')
remove_node(parent, node)
return j
i = parse_article_part_reference(tokens, i, node)
# i = parse_quote(tokens, i, node)
debug(parent, tokens, i, 'parse_header1_reference end')
return i
def parse_article_part_reference(tokens, i, parent):
if i >= len(tokens):
return i
# i = alinea_lexer.skip_to_next_word(tokens, i)
i = parse_one_of(
[
parse_alinea_reference,
parse_sentence_reference,
parse_word_reference,
parse_article_reference,
parse_header1_reference,
parse_header2_reference,
parse_header3_reference,
],
tokens,
i,
parent
)
return i
def parse_quote(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_QUOTE,
'words': '',
})
debug(parent, tokens, i, 'parse_quote')
i = alinea_lexer.skip_spaces(tokens, i)
# "
if tokens[i] == alinea_lexer.TOKEN_DOUBLE_QUOTE_OPEN:
i += 1
# # est rédigé(es)
# # ainsi rédigé(es)
# # est ainsi rédigé(es)
# elif (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
# or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
# i = alinea_lexer.skip_to_quote_start(tokens, i + 2) + 1
else:
remove_node(parent, node)
return i
while i < len(tokens) and tokens[i] != alinea_lexer.TOKEN_DOUBLE_QUOTE_CLOSE and tokens[i] != alinea_lexer.TOKEN_NEW_LINE:
node['words'] += tokens[i]
i += 1
node['words'] = node['words'].strip()
# skipalinea_lexer.TOKEN_DOUBLE_QUOTE_CLOSE
i += 1
i = alinea_lexer.skip_spaces(tokens, i)
debug(parent, tokens, i, 'parse_quote end')
return i
# Parse the verb to determine the corresponding action (one of 'add', 'delete', 'edit' or 'replace').
def parse_edit(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_EDIT
})
debug(parent, tokens, i, 'parse_edit')
# Supprimer {reference}
if tokens[i] == u'Supprimer':
i += 2
node['editType'] = 'delete'
i = parse_reference(tokens, i, node)
return i
r = i
# i = parse_for_each(parse_reference, tokens, i, node)
i = parse_reference_list(tokens, i, node)
# if we did not parse a reference
i = alinea_lexer.skip_spaces(tokens, i)
# if we didn't find any reference as a subject and the subject/verb are not reversed
if len(node['children']) == 0 and tokens[i] != 'Est' and tokens[i] != 'Sont':
remove_node(parent, node)
debug(parent, tokens, i, 'parse_edit none')
return i
# i = r
i = alinea_lexer.skip_tokens(tokens, i, lambda t: t.lower() not in [u'est', u'sont', u'devient'] and not t == u'.')
if i + 2 >= len(tokens):
remove_node(parent, node)
debug(parent, tokens, i, 'parse_edit eof')
return r
# sont supprimés
# sont supprimées
# est supprimé
# est supprimée
# est abrogé
# est abrogée
# sont abrogés
# sont abrogées
if i + 2 < len(tokens) and (tokens[i + 2].startswith(u'supprimé') or tokens[i + 2].startswith(u'abrogé')):
node['editType'] = 'delete'
i = alinea_lexer.skip_to_end_of_line(tokens, i)
# est ainsi rédigé
# est ainsi rédigée
# est ainsi modifié
# est ainsi modifiée
elif i + 4 < len(tokens) and (tokens[i + 4].startswith(u'rédigé') or tokens[i + 4].startswith(u'modifié')):
node['editType'] = 'edit'
i = alinea_lexer.skip_to_end_of_line(tokens, i)
i = alinea_lexer.skip_spaces(tokens, i)
i = parse_definition(tokens, i, node)
# est remplacé par
# est remplacée par
# sont remplacés par
# sont remplacées par
elif i + 2 < len(tokens) and (tokens[i + 2].startswith(u'remplacé')):
node['editType'] = 'replace'
i += 6
i = parse_definition(tokens, i, node)
i = alinea_lexer.skip_to_end_of_line(tokens, i)
# remplacer
elif tokens[i].lower() == u'remplacer':
node['editType'] = 'replace'
i += 2
# i = parse_definition(tokens, i, node)
i = parse_reference(tokens, i, node)
i = alinea_lexer.skip_to_end_of_line(tokens, i)
if tokens[i].lower() == 'par':
i += 2
i = parse_definition(tokens, i, node)
i = alinea_lexer.skip_to_end_of_line(tokens, i)
# est inséré
# est insérée
# sont insérés
# sont insérées
# est ajouté
# est ajoutée
# sont ajoutés
# sont ajoutées
elif i + 2 < len(tokens) and (tokens[i + 2].startswith(u'inséré') or tokens[i + 2].startswith(u'ajouté')):
node['editType'] = 'add'
i += 4
i = parse_definition(tokens, i, node)
i = alinea_lexer.skip_to_end_of_line(tokens, i)
# est ainsi rétabli
elif i + 4 < len(tokens) and tokens[i + 4].startswith(u'rétabli'):
node['editType'] = 'add'
i = alinea_lexer.skip_to_end_of_line(tokens, i)
i = alinea_lexer.skip_spaces(tokens, i)
i = parse_definition(tokens, i, node)
# est complété par
elif i + 2 < len(tokens) and tokens[i + 2] == u'complété':
node['editType'] = 'add'
i += 6
# i = parse_definition(tokens, i, node)
i = parse_definition_list(tokens, i, node)
# i = alinea_lexer.skip_to_end_of_line(tokens, i)
# devient
elif tokens[i] == u'devient':
node['editType'] = 'rename'
i += 2
i = parse_definition(tokens, i, node)
# est ratifié:
elif i + 2 < len(tokens) and (tokens[i].lower() == u'est' and tokens[i + 2] == u'ratifié'):
node['editType']= 'ratified'
i += 4
else:
i = r
debug(parent, tokens, i, 'parse_edit remove')
remove_node(parent, node)
i = parse_raw_article_content(tokens, i, parent)
i = alinea_lexer.skip_to_end_of_line(tokens, i)
return i
# We've parsed pretty much everything we could handle. At this point,
# there should be no meaningful content. But their might be trailing
# spaces or ponctuation (often "." or ";"), so we skip to the end of
# the line.
i = alinea_lexer.skip_to_end_of_line(tokens, i)
debug(parent, tokens, i, 'parse_edit end')
return i
def parse_raw_article_content(tokens, i, parent):
node = create_node(parent, {
'type': 'raw-content',
'content': ''
})
debug(parent, tokens, i, 'parse_raw_article_content')
while i < len(tokens) and tokens[i] != alinea_lexer.TOKEN_NEW_LINE:
node['content'] += tokens[i]
i += 1
if node['content'] == '' or is_space(node['content']):
remove_node(parent, node)
debug(parent, tokens, i, 'parse_raw_article_content end')
return i
def parse_code_name(tokens, i, node):
while i < len(tokens) and tokens[i] != u',' and tokens[i] != u'est':
node['id'] += tokens[i]
i += 1
node['id'] = node['id'].strip()
return i
# Parse a reference to a specific or aforementioned code.
# References to a specific code are specified by using the exact name of that code (cf parse_code_name).
# References to an aforementioned code will be in the form of "le même code".
def parse_code_reference(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_CODE_REFERENCE,
'id': '',
})
debug(parent, tokens, i, 'parse_code_reference')
# code
if tokens[i] == u'code':
i = parse_code_name(tokens, i, node)
# le code
# du code
elif tokens[i].lower() in [u'le', u'du'] and tokens[i + 2] == 'code':
i = parse_code_name(tokens, i + 2, node)
# le même code
# du même code
elif tokens[i].lower() in [u'le', u'du'] and tokens[i + 2] == u'même' and tokens[i + 4] == 'code':
remove_node(parent, node)
codeRefs = filter_nodes(
get_root(parent),
lambda n: 'type' in n and n['type'] == TYPE_CODE_REFERENCE
)
# the lduralex.tree.one in order of traversal is the previous one in order of syntax
node = copy_node(codeRefs[-1])
node['children'] = []
push_node(parent, node)
# skip "le même code "
i += 6
if node['id'] == '' or is_space(node['id']):
remove_node(parent, node)
else:
i = parse_reference(tokens, i, node)
debug(parent, tokens, i, 'parse_code_reference end')
return i
def parse_definition_list(tokens, i, parent):
if i >= len(tokens):
return i
i = parse_definition(tokens, i, parent)
i = alinea_lexer.skip_spaces(tokens, i)
if ((i + 2 < len(tokens) and tokens[i] == u',' and tokens[i + 2] in [u'à', u'au'])
or (i + 2 < len(tokens) and tokens[i] == u'et')):
i = parse_definition_list(tokens, i + 2, parent)
i = alinea_lexer.skip_spaces(tokens, i)
# est rédigé(es)
# ainsi rédigé(es)
# est ainsi rédigé(es)
if (i + 2 < len(tokens) and tokens[i + 2].startswith(u'rédigé')
or (i + 4 < len(tokens) and tokens[i + 4].startswith(u'rédigé'))):
i += 6
def_nodes = filter_nodes(parent, lambda x: duralex.tree.is_definition(x))
for def_node in def_nodes:
i = alinea_lexer.skip_to_quote_start(tokens, i)
i = parse_quote(tokens, i, def_node)
return i
# Parse multiple references separated by comas or the "et" word.
# All the parsed references will be siblings in parent['children'] and reso lve_fully_qualified_references + sort_references
# will take care of reworking the tree to make sure each reference in the list is complete and consistent.
def parse_reference_list(tokens, i, parent):
if i >= len(tokens):
return i
i = parse_reference(tokens, i, parent)
i = alinea_lexer.skip_spaces(tokens, i)
if ((i + 2 < len(tokens) and tokens[i] == u',' and tokens[i + 2] in [u'à', u'au'])
or (i + 2 < len(tokens) and tokens[i] == u'et')):
i = parse_reference_list(tokens, i + 2, parent)
i = alinea_lexer.skip_spaces(tokens, i)
return i
def parse_one_of(fns, tokens, i, parent):
# i = alinea_lexer.skip_to_next_word(tokens, i)
if i >= len(tokens):
return i
for fn in fns:
j = fn(tokens, i, parent)
if j != i:
return j
i = j
return i
def parse_reference(tokens, i, parent):
# node = create_node(parent, {'type':'reference'})
node = parent
j = i
i = parse_one_of(
[
parse_law_reference,
parse_code_reference,
parse_code_part_reference,
parse_section_reference,
parse_subsection_reference,
parse_chapter_reference,
parse_title_reference,
parse_book_reference,
parse_article_reference,
parse_article_part_reference,
parse_paragraph_reference,
parse_back_reference,
parse_incomplete_reference,
parse_alinea_reference,
parse_word_reference,
parse_bill_article_reference,
],
tokens,
i,
node
)
# if len(node['children']) == 0:
# remove_node(parent, node)
# return j
return i
# {romanNumber}.
# u'ex': I., II.
def parse_header1(tokens, i, parent):
if i >= len(tokens):
return i
i = alinea_lexer.skip_spaces(tokens, i)
node = create_node(parent, {
'type': TYPE_HEADER1,
})
debug(parent, tokens, i, 'parse_header1')
# skip '{romanNumber}.'
if is_roman_number(tokens[i]) and tokens[i + 1] == u'.':
debug(parent, tokens, i, 'parse_header1 found article header-1')
node['order'] = parse_roman_number(tokens[i])
i = alinea_lexer.skip_to_next_word(tokens, i + 2)
else:
remove_node(parent, node)
node = parent
j = i
i = parse_edit(tokens, i, node)
i = parse_for_each(parse_header2, tokens, i, node)
if len(node['children']) == 0:
i = parse_raw_article_content(tokens, i, node)
i = parse_for_each(parse_header2, tokens, i, node)
if len(node['children']) == 0 and parent != node:
remove_node(parent, node)
debug(parent, tokens, i, 'parse_header1 end')
return i
# {number}°
# u'ex': 1°, 2°
def parse_header2(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_HEADER2,
})
debug(parent, tokens, i, 'parse_header2')
i = alinea_lexer.skip_spaces(tokens, i)
if i < len(tokens) and re.compile(u'\d+°').match(tokens[i]):
debug(parent, tokens, i, 'parse_header2 found article header-2')
node['order'] = parse_int(tokens[i])
# skip {number}°
i += 2
i = alinea_lexer.skip_to_next_word(tokens, i)
else:
remove_node(parent, node)
node = parent
j = i
i = parse_edit(tokens, i, node)
i = parse_for_each(parse_header3, tokens, i, node)
if len(node['children']) == 0 and 'order' in node:
i = parse_raw_article_content(tokens, i, node)
i = parse_for_each(parse_header3, tokens, i, node)
if node != parent and len(node['children']) == 0:
remove_node(parent, node)
debug(parent, tokens, i, 'parse_header2 end')
return i
# {number})
# u'ex': a), b), a (nouveau))
def parse_header3(tokens, i, parent):
if i >= len(tokens):
return i
node = create_node(parent, {
'type': TYPE_HEADER3,
})
debug(parent, tokens, i, 'parse_header3')
i = alinea_lexer.skip_spaces(tokens, i)
if i >= len(tokens):
remove_node(parent, node)
return i
match = re.compile('([a-z]+)').match(tokens[i])
if match and (tokens[i + 1] == u')' or (tokens[i + 2] == u'(' and tokens[i + 5] == u')')):
node['order'] = ord(match.group()[0].encode('utf-8')) - ord('a') + 1
# skip'{number}) ' or '{number} (nouveau))'
if tokens[i + 1] == u')':
i += 3
else:
i += 7
# i = parse_edit(tokens, i, node)
else:
remove_node(parent, node)
node = parent
j = i
i = parse_edit(tokens, i, node)
if len(node['children']) == 0 and 'order' in node:
i = parse_raw_article_content(tokens, i, node)
if node != parent and len(node['children']) == 0:
remove_node(parent, node)
debug(parent, tokens, i, 'parse_header3 end')
return i
def parse_for_each(fn, tokens, i, parent):
n = parent() if callable(parent) else parent
test = fn(tokens, i, n)
if (test == i or len(n['children']) == 0) and callable(parent):
remove_node(n['parent'], n)
while test != i:
i = test
n = parent() if callable(parent) else parent
test = fn(tokens, i, n)
if (test == i or len(n['children']) == 0) and callable(parent):
remove_node(n['parent'], n)
return i
def parse_bill_articles(data, parent):
if 'articles' in data:
for article_data in data['articles']:
parse_bill_article(article_data, parent)
elif 'alineas' in data:
parse_bill_article(data, parent)
return data
def parse_bill_article(data, parent):
node = create_node(parent, {
'type': TYPE_BILL_ARTICLE,
'order': 1,
'isNew': False
})
node['order'] = data['order']
if 'alineas' in data:
parse_json_alineas(data['alineas'], node)
def parse_json_alineas(data, parent):
text = alinea_lexer.TOKEN_NEW_LINE.join(value for key, value in list(iter(sorted(data.items()))))
parent['content'] = text#.decode('utf-8')
return parse_alineas(text, parent)
def parse_alineas(data, parent):
tokens = alinea_lexer.tokenize(data.strip())
parse_for_each(parse_header1, tokens, 0, parent)
if len(parent['children']) == 0:
parse_raw_article_content(tokens, 0, parent)
def parse(data, tree):
# tree = create_node(tree, {'type': 'articles'})
parse_bill_articles(data, tree)
return tree
|
Legilibre/duralex
|
duralex/alinea_parser.py
|
Python
|
mit
| 59,689
|
# coding: utf-8
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from composition import views
urlpatterns = [
url(r'^$', views.CompositionList.as_view()),
url(r'^(?P<pk>[0-9]+)/$', views.CompositionDetail.as_view()),
url(r'^image/(?P<pk>[0-9]+)/$', views.CompositionImage.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
lbjworld/article-search
|
article-manager/site/composition/urls.py
|
Python
|
mit
| 409
|
"""
Take 1 on the RandomForest, predicting for country_destinations.
"""
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
training = pd.read_csv("protoAlpha_training.csv")
testing = pd.read_csv("protoAlpha_testing.csv")
X = training.iloc[:,1:-1].values
y = training['country_destination'].values
x_train,x_valid,y_train,y_valid = train_test_split(X,y,test_size=0.3,random_state=None)
# LabelEncoder
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(y_train);
y_train = le.transform(y_train);
y_valid = le.transform(y_valid);
# Train classifier
import xgboost as xgb
xg_train = xgb.DMatrix(x_train,label=y_train);
xg_valid = xgb.DMatrix(x_valid,label=y_valid);
# setup parameters for xgboost
param = {}
# use softmax multi-class classification
param['objective'] = 'multi:softmax' # can be 'multi:softmax' or 'multi:softprob'
# scale weight of positive examples
param['eta'] = 0.9
param['max_depth'] = 100
param['gamma'] = 0.1
param['silent'] = 0 # 1 means silent mode
param['nthread'] = 5
param['min_child_weight'] = 0.1 # 1 is the default; the larger, the more conservative
param['num_class'] = len(np.unique(y_train).tolist());
param['booster'] = 'gbtree' # default is 'gbtree'
param['subsample'] = 1.0 # default is 1.0
param['base_score'] = 0.5 # default is 0.5
# Train & Get validation data
num_round = 10
clf = xgb.train(param, xg_train, num_round);
#clf = xgb.cv(param, xg_train, num_round);
# get predictions
y_preds = clf.predict( xg_valid );
# Run Predictions
from sklearn.metrics import confusion_matrix, accuracy_score
print( confusion_matrix(y_valid,y_preds) );
print( "Accuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f = open('xgboost_take9.txt', 'w')
f.write( str(confusion_matrix(y_valid,y_preds)) );
f.write( "\nAccuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f.write( str(param) );
# Now on to final submission
xg_test = xgb.DMatrix(testing.iloc[:,1:].values);
y_final = le.inverse_transform( clf.predict(xg_test).reshape([62096,]).astype(int) );
y_final = pd.DataFrame(y_final);
numbahs = testing['id']
df = pd.concat([numbahs,y_final],axis=1)
df.columns = ['id','country']
df.to_csv("xgboost_take9.csv",index=False)
# Save model
clf.save_model('xgb_take9.model');
|
valexandersaulys/airbnb_kaggle_contest
|
prototype_alpha/xgboost_take9.py
|
Python
|
gpl-2.0
| 2,282
|
from . import test_customize
from . import test_sale_process
from . import test_website_sale_cart_recovery
from . import test_website_sale_mail
from . import test_website_sale_pricelist
from . import test_website_sale_product_attribute_value_config
from . import test_website_sale_image
|
t3dev/odoo
|
addons/website_sale/tests/__init__.py
|
Python
|
gpl-3.0
| 287
|
#!/usr/bin/env python
import sys
def convert_str(infile, outfile):
f = open(infile, 'r')
lines = f.readlines()
f.close()
f = open(outfile, 'w')
f.writelines(['"%s\\n"\n' % i.rstrip() for i in lines])
f.close()
def main():
convert_str('fountain.vert', 'fountain.vert.inc')
convert_str('fountain.frag', 'fountain.frag.inc')
if __name__ == '__main__':
main()
|
fountainment/FountainEngineImproved
|
fountain/render/convert_shader.py
|
Python
|
mit
| 396
|
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_image(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 42, 44
graphEntity.__init__(self, x, y)
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
try:
self.image_gf0 = PhotoImage(file='genOOCSMP.gif')
h = drawing.create_image(self.translate([1.0, 1.0]), tags = self.tag, image=self.image_gf0 )
self.gf0 = GraphicalForm(drawing, h, "gf0")
self.graphForms.append(self.gf0)
except:
pass
def postCondition (self, actionID, * params):
return None
def preCondition (self, actionID, * params):
return None
new_class = graph_image
|
Balannen/LSMASOMM
|
atom3/Kernel/GraphicalObjects/graph_image.py
|
Python
|
gpl-3.0
| 1,116
|
import logging
try:
from typing import Union, Optional
except ImportError:
pass
import rope.base.utils as base_utils
from rope.base.evaluate import ScopeNameFinder
from rope.base.exceptions import AttributeNotFoundError
from rope.base.pyobjects import PyClass, PyDefinedObject, PyFunction, PyObject
from rope.base.utils import pycompat
def get_super_func(pyfunc):
if not isinstance(pyfunc.parent, PyClass):
return
for cls in get_mro(pyfunc.parent)[1:]:
try:
superfunc = cls.get_attribute(pyfunc.get_name()).get_object()
except AttributeNotFoundError:
pass
else:
if isinstance(superfunc, PyFunction):
return superfunc
def get_super_assignment(pyname):
"""
:type pyname: rope.base.pynamesdef.AssignedName
:type: rope.base.pynamesdef.AssignedName
"""
try:
pyclass, attr_name = get_class_with_attr_name(pyname)
except TypeError:
return
else:
for super_pyclass in get_mro(pyclass)[1:]:
if attr_name in super_pyclass:
return super_pyclass[attr_name]
def get_class_with_attr_name(pyname):
"""
:type pyname: rope.base.pynamesdef.AssignedName
:return: rope.base.pyobjectsdef.PyClass, str
:rtype: tuple
"""
lineno = get_lineno_for_node(pyname.assignments[0].ast_node)
holding_scope = pyname.module.get_scope().get_inner_scope_for_line(lineno)
pyobject = holding_scope.pyobject
if isinstance(pyobject, PyClass):
pyclass = pyobject
elif isinstance(pyobject, PyFunction) and isinstance(pyobject.parent, PyClass):
pyclass = pyobject.parent
else:
return
for name, attr in pyclass.get_attributes().items():
if attr is pyname:
return (pyclass, name)
def get_lineno_for_node(assign_node):
if hasattr(assign_node, "lineno") and assign_node.lineno is not None:
return assign_node.lineno
return 1
def get_mro(pyclass):
# FIXME: to use real mro() result
class_list = [pyclass]
for cls in class_list:
for super_cls in cls.get_superclasses():
if isinstance(super_cls, PyClass) and super_cls not in class_list:
class_list.append(super_cls)
return class_list
def resolve_type(type_name, pyobject):
# type: (str, Union[PyDefinedObject, PyObject]) -> Optional[PyDefinedObject, PyObject]
"""
Find proper type object from its name.
"""
deprecated_aliases = {"collections": "collections.abc"}
ret_type = None
logging.debug("Looking for %s", type_name)
if "." not in type_name:
try:
ret_type = (
pyobject.get_module().get_scope().get_name(type_name).get_object()
)
except AttributeNotFoundError:
logging.exception("Cannot resolve type %s", type_name)
else:
mod_name, attr_name = type_name.rsplit(".", 1)
try:
mod_finder = ScopeNameFinder(pyobject.get_module())
mod = mod_finder._find_module(mod_name).get_object()
ret_type = mod.get_attribute(attr_name).get_object()
except AttributeNotFoundError:
if mod_name in deprecated_aliases:
try:
logging.debug(
"Looking for %s in %s", attr_name, deprecated_aliases[mod_name]
)
mod = mod_finder._find_module(
deprecated_aliases[mod_name]
).get_object()
ret_type = mod.get_attribute(attr_name).get_object()
except AttributeNotFoundError:
logging.exception(
"Cannot resolve type %s in %s", attr_name, dir(mod)
)
logging.debug("ret_type = %s", ret_type)
return ret_type
class ParametrizeType(object):
_supported_mapping = {
"builtins.list": "rope.base.builtins.get_list",
"builtins.tuple": "rope.base.builtins.get_tuple",
"builtins.set": "rope.base.builtins.get_set",
"builtins.dict": "rope.base.builtins.get_dict",
"_collections_abc.Iterable": "rope.base.builtins.get_iterator",
"_collections_abc.Iterator": "rope.base.builtins.get_iterator",
"collections.abc.Iterable": "rope.base.builtins.get_iterator", # Python3.3
"collections.abc.Iterator": "rope.base.builtins.get_iterator", # Python3.3
}
if pycompat.PY2:
_supported_mapping = dict(
(
(
k.replace("builtins.", "__builtin__.").replace(
"_collections_abc.", "_abcoll."
),
v,
)
for k, v in _supported_mapping.items()
)
)
def __call__(self, pyobject, *args, **kwargs):
"""
:type pyobject: rope.base.pyobjects.PyObject
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
type_factory = self._get_type_factory(pyobject)
if type_factory:
parametrized_type = type_factory(*args, **kwargs)
if parametrized_type:
return parametrized_type
return pyobject
def _get_type_factory(self, pyobject):
type_str = "{0}.{1}".format(
pyobject.get_module().get_name(),
pyobject.get_name(),
)
if type_str in self._supported_mapping:
return base_utils.resolve(self._supported_mapping[type_str])
parametrize_type = ParametrizeType()
|
python-rope/rope
|
rope/base/oi/type_hinting/utils.py
|
Python
|
lgpl-3.0
| 5,609
|
"""
Visualize the Impact of Hygroscopic Growth
==========================================
_thumb: .4, .4
"""
import seaborn as sns
import numpy as np
import opcsim
sns.set(style='ticks', font_scale=1.25)
# build a distribution for a single mode of ammonium sulfate
d = opcsim.AerosolDistribution("Ammonium Sulfate")
# add a single mode
d.add_mode(1e3, 0.8e-2, 1.5, refr=(1.521+0j), rho=1.77, kappa=0.53)
# iterate over a few RH's and plot
ax = None
cpal = sns.color_palette("GnBu_d", 5)
for i, rh in enumerate(np.linspace(5, 95, 5)):
ax = opcsim.plots.pdfplot(d, rh=rh, plot_kws=dict(color=cpal[i]),
ax=ax, weight='volume', label="RH={:.0f}%".format(rh))
# Set the title and axes labels
ax.set_title("Ammonium Sulfate", fontsize=18)
# Add a legend
ax.legend(loc='best')
# Set the ylim
ax.set_ylim(0, None)
# Remove the top and right spines
sns.despine()
|
dhhagan/opcsim
|
examples/hygroscopic_growth_pdf.py
|
Python
|
mit
| 896
|
import logging
from typing import Callable
import weakref
from functools import partial
_LOG = logging.getLogger(__name__)
class BoundForwardReference(object):
@property
def resolver(self):
return self._resolver
@resolver.setter
def resolver(self, value: Callable):
try:
self._resolver = weakref.WeakMethod(value)
except TypeError:
self._resolver = weakref.ref(value)
def __init__(self, instance, resource_key_fget):
super(BoundForwardReference, self).__init__()
self._instance_ref = weakref.ref(instance)
self._resource_key_fget = weakref.WeakMethod(resource_key_fget)
self._resolver = None
def __get__(self, instance, owner=None):
if not instance:
return self
if self._resource_key_fget is None:
# Should never get here, but just in case
msg = 'ResourceReference not attached to a getter method.'
raise AttributeError(msg)
resolver = self._resolver
if isinstance(resolver, weakref.ref):
resolver = resolver()
if not resolver:
msg = 'Failed to resolve Resource: Resolver reference dead'
raise RuntimeError(msg)
resource_key = self._resource_key_fget(instance)
try:
result = resolver(resource_key)
except Exception as e:
msg = 'Failed to resolve Resource reference: "{0}" - {1}: {2}'
msg = msg.format(resource_key, type(e).__name__, e)
_LOG.debug(msg)
else:
msg = 'Resolved Resource: "{0}"'
msg = msg.format(repr(result))
_LOG.debug(msg)
return result
class ForwardReference(object):
def __init__(self, resource_key_fget):
super(ForwardReference, self).__init__()
self._resource_key_fget = resource_key_fget
self._bound_fwd_refs = weakref.WeakKeyDictionary()
def __get__(self, instance, owner=None):
if not instance:
return self
try:
result = self._bound_fwd_refs[instance]
except KeyError:
result = BoundForwardReference(instance, self._resource_key_fget)
self._bound_fwd_refs[instance] = result
return result
def __set__(self, instance, value):
if self._bound_fwd_refs.get(instance) is value:
return
raise ValueError()
class ResourceReference(object):
"""
Provides a mechanism for allowing one `Resource` object to provide access
to another `Resource` object.
The implementation of this mechanism relies on a `Model` to provide a
reference to its resources table upon registration of a `Resource`.
Currently, multiple `Model` instances can exist at the same time. Being a
descriptor, `ResourceReference` instances exist in a broader scope (class
instances) than `Model` instances. Thus, `ResourceReference` instances must
account for the possibility that multiple resource tables will need to be
handled concurrently. To handle this possibility, `ResourceReference`
maintains a map of `Resource` Ids and resource handles. Whenever a `Model`
registers a `Resource`, a new entry is created in this mapping. This
directly affects performance when registering and resolving `Resource`s.
Should a bottleneck occur, a decision will need to be made as to whether
or not multiple `Model` instances should be permitted.
"""
def __init__(self, resource_key_fget):
self._resource_key_fget = resource_key_fget
self._map__resource_id__resolver = weakref.WeakKeyDictionary()
def __get__(self, instance, _):
if instance is None:
return self
if self._resource_key_fget is None:
# Should never get here, but just in case
msg = 'ResourceReference not attached to a getter method.'
raise AttributeError(msg)
result = None
try:
resolver = self._map__resource_id__resolver[instance.id]
except TypeError:
# Occurs when instance.id is None. While this happens during
# testing, it should not happen in production.
return result
except KeyError:
msg = (
'Failed to resolve Resource:'
'Resource instance "{0}" not registered with a Model.'
)
msg = msg.format(repr(instance))
_LOG.warn(msg)
return result
else:
if isinstance(resolver, weakref.ref):
resolver = resolver()
if not resolver:
msg = 'Failed to resolve Resource: Resolver reference dead'
raise RuntimeError(msg)
resource_key = self._resource_key_fget(instance)
try:
result = resolver(resource_key)
except Exception as e:
msg = 'Failed to resolve Resource reference: "{0}" - {1}: {2}'
msg = msg.format(resource_key, type(e).__name__, e)
_LOG.debug(msg)
else:
msg = 'Resolved Resource: "{0}"'
msg = msg.format(repr(result))
_LOG.debug(msg)
return result
def add_resolver(self, resource_instance, resolver):
"""
Registers a callable capable of producing one or more `Resources`
using a given key.
The intention behind registration is to provide for the possibility
that multiple `Model` instances may exist within the same interpreter.
Whenever a `Resource` is registered with a `Model`, the `Model` will
register a handler with this object.
Note:
This could be problematic, as it allows for the possibility of a
very large number of reference objects to be created.
See class docstring.
"""
callback_died_handler = partial(
weakref.WeakMethod(self._callback_died),
weakref.ref(resource_instance.id))
try:
resolver = weakref.WeakMethod(resolver, callback_died_handler)
except TypeError:
resolver = weakref.ref(resolver, callback_died_handler)
map_rid_r = self._map__resource_id__resolver
map_rid_r[resource_instance.id] = resolver
def remove_resolver(self, resource_instance):
"""
Removes a previously registered resolver callable.
"""
map_rid_r = self._map__resource_id__resolver
try:
del map_rid_r[resource_instance.id]
except KeyError:
pass
def _callback_died(self, resource_id_ref, _):
try:
del self._map__resource_id__resolver[resource_id_ref()]
except KeyError:
pass
|
artPlusPlus/elemental-backend
|
elemental_backend/resources/_resource_reference.py
|
Python
|
mpl-2.0
| 6,809
|
import functools
from django import http
from django.shortcuts import get_object_or_404
import commonware.log
from olympia.access import acl
from olympia.addons.models import Addon
log = commonware.log.getLogger('mkt.purchase')
def owner_or_unlisted_reviewer(request, addon):
return (acl.check_unlisted_addons_reviewer(request) or
# We don't want "admins" here, because it includes anyone with the
# "Addons:Edit" perm, we only want those with
# "Addons:ReviewUnlisted" perm (which is checked above).
acl.check_addon_ownership(request, addon, admin=False, dev=True,
viewer=True, support=True))
def addon_view(f, qs=Addon.objects.all):
@functools.wraps(f)
def wrapper(request, addon_id=None, app_slug=None, *args, **kw):
"""Provides an addon given either an addon id or an addon slug."""
assert addon_id, 'Must provide addon id or slug'
if addon_id and addon_id.isdigit():
addon = get_object_or_404(qs(), id=addon_id)
# Don't get in an infinite loop if addon.slug.isdigit().
if addon.slug and addon.slug != addon_id:
url = request.path.replace(addon_id, addon.slug, 1)
if request.GET:
url += '?' + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(url)
else:
addon = get_object_or_404(qs(), slug=addon_id)
# If the addon is unlisted it needs either an owner/viewer/dev/support,
# or an unlisted addon reviewer.
if not (addon.is_listed or owner_or_unlisted_reviewer(request, addon)):
raise http.Http404
return f(request, addon, *args, **kw)
return wrapper
def addon_view_factory(qs):
# Don't evaluate qs or the locale will get stuck on whatever the server
# starts with. The addon_view() decorator will call qs with no arguments
# before doing anything, so lambdas are ok.
# GOOD: Addon.objects.valid
# GOOD: lambda: Addon.objects.valid().filter(type=1)
# BAD: Addon.objects.valid()
return functools.partial(addon_view, qs=qs)
|
andymckay/addons-server
|
src/olympia/addons/decorators.py
|
Python
|
bsd-3-clause
| 2,174
|
#!/usr/bin/env python
#
# Copyright 2011,2012,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt5 import QtWidgets, Qt
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt5 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class dialog_box(QtWidgets.QWidget):
def __init__(self, display, control):
QtWidgets.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtWidgets.QBoxLayout(QtWidgets.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtWidgets.QToolTip.setFont(Qt.QFont('OldEnglish', 10))
self.layout = QtWidgets.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtWidgets.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.freq1Edit.editingFinished.connect(self.freq1EditText)
self.amp1Edit = QtWidgets.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.amp1Edit.editingFinished.connect(self.amp1EditText)
# Control the second signal
self.freq2Edit = QtWidgets.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.freq2Edit.editingFinished.connect(self.freq2EditText)
self.amp2Edit = QtWidgets.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.amp2Edit.editingFinished.connect(self.amp2EditText)
self.quit = QtWidgets.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.quit.clicked.connect(QtWidgets.qApp.quit)
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(("{0}").format(self.signal1.frequency()))
self.amp1Edit.setText(("{0}").format(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(("{0}").format(self.signal2.frequency()))
self.amp2Edit.setText(("{0}").format(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 200
npts = 2048
self.qapp = QtWidgets.QApplication(sys.argv)
ss = open(gr.prefix() + '/share/gnuradio/themes/dark.qss')
sstext = ss.read()
ss.close()
self.qapp.setStyleSheet(sstext)
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.01)
thr = blocks.throttle(gr.sizeof_gr_complex, 100*npts)
self.snk1 = qtgui.time_sink_c(npts, Rs,
"Complex Time Example", 1, None)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, (self.snk1, 0))
#self.connect(src1, (self.snk1, 1))
#self.connect(src2, (self.snk1, 2))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt5.QtWidgets.QWidget
pyWin = sip.wrapinstance(pyQt, QtWidgets.QWidget)
# Example of using signal/slot to set the title of a curve
# FIXME: update for Qt5
#pyWin.setLineLabel.connect(pyWin.setLineLabel)
#pyWin.emit(QtCore.SIGNAL("setLineLabel(int, QString)"), 0, "Re{sum}")
self.snk1.set_line_label(0, "Re{Sum}")
self.snk1.set_line_label(1, "Im{Sum}")
#self.snk1.set_line_label(2, "Re{src1}")
#self.snk1.set_line_label(3, "Im{src1}")
#self.snk1.set_line_label(4, "Re{src2}")
#self.snk1.set_line_label(5, "Im{src2}")
# Can also set the color of a curve
#self.snk1.set_color(5, "blue")
self.snk1.set_update_time(0.5)
#pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
|
mbr0wn/gnuradio
|
gr-qtgui/examples/pyqt_time_c.py
|
Python
|
gpl-3.0
| 6,141
|
# -*- coding: utf-8 -*-
""" Unit tests for the MultiWarpClassifier class.
"""
import unittest
import numpy as np
from warpclassifier import WarpClassifier
from ioutils import load_data, load_data_pixiv
from features import Combine, BGRHist, HoG
from cross_validation import k_fold_split
class TestWarpClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
names = [
"初音ミク",
"鏡音リン",
"本田菊",
"チルノ",
"鏡音レン",
"アーサー・カークランド",
"レミリア",
"暁美ほむら",
"アリス",
"霧雨魔理沙",
"ルーミア",
"黒子テツヤ",
"美樹さやか",
"巡音ルカ",
"ギルベルト・バイルシュミット",
"フランドール・スカーレット",
"坂田銀時",
"古明地こいし",
"東風谷早苗",
"アルフレッド・F・ジョーンズ"
]
print "loading data..."
images, labels = load_data_pixiv('data/pixiv-images-1000', names)
print "finished loading data."
fold_samples, fold_labels = k_fold_split(images, labels, 3)
cls.traindata = reduce(lambda l1,l2: l1 + l2, fold_samples[1:])
cls.trainlabels = reduce(lambda l1,l2: l1 + l2, fold_labels[1:])
cls.testdata = fold_samples[0]
cls.testlabels = fold_labels[0]
print repr(len(cls.traindata)) + " training samples"
print repr(len(cls.testdata)) + " test samples"
print repr(len(images)) + " total"
def test_classifier(self):
""" Tests one vs all classification.
"""
# Run training.
nbbins = (4,4,4)
feature = Combine(
HoG(9, 0),
BGRHist(nbbins, 0)
)
mindimdiv = 10
C = 0.1
classifier = WarpClassifier(
feature,
mindimdiv,
C,
learning_rate=0.001,
nb_iter=50,
inc_rate=1.2,
dec_rate=0.5,
verbose=True,
use_pca=0.9
)
classifier.train_named(self.traindata, self.trainlabels)
predicted = classifier.predict_named(self.testdata)
print classifier.top_accuracy_named(self.testdata, self.testlabels)
if __name__ == "__main__":
unittest.main()
|
alexisVallet/dpm-identification
|
test_warpclassifier.py
|
Python
|
gpl-2.0
| 2,485
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.interactions import base
class CodeRepl(base.BaseInteraction):
"""Interaction that allows programs to be input."""
name = 'Code Editor'
description = 'Allows learners to enter code and get it evaluated.'
display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
is_trainable = True
_dependency_ids = ['skulpt', 'codemirror']
answer_type = 'CodeEvaluation'
instructions = 'Type code in the editor'
needs_summary = True
# Language options 'lua', 'scheme', 'coffeescript', 'javascript', and
# 'ruby' have been removed for possible later re-release.
_customization_arg_specs = [{
'name': 'language',
'description': 'Programming language',
'schema': {
'type': 'unicode',
'choices': [
'python',
]
},
'default_value': 'python'
}, {
'name': 'placeholder',
'description': 'Initial code displayed',
'schema': {
'type': 'unicode',
'ui_config': {
'coding_mode': 'none',
},
},
'default_value': '# Type your code here.'
}, {
'name': 'preCode',
'description': 'Code to prepend to the learner\'s submission',
'schema': {
'type': 'unicode',
'ui_config': {
'coding_mode': 'none',
},
},
'default_value': ''
}, {
'name': 'postCode',
'description': 'Code to append after the learner\'s submission',
'schema': {
'type': 'unicode',
'ui_config': {
'coding_mode': 'none',
},
},
'default_value': ''
}]
|
won0089/oppia
|
extensions/interactions/CodeRepl/CodeRepl.py
|
Python
|
apache-2.0
| 2,338
|
import numpy as np
import control
import sympy
from pypiw import algorithms, systems
def main():
""" Example demonstrating how to do a simple identification
"""
# Create the dataset to work on
t = np.arange(0, 20, 0.02)
x = np.ones(len(t))
tf = control.tf([2.0, 1.0], [-3.0, 1.0])
_, y, _ = control.forced_response(tf, t, x)
# Create the transfer function to identify using sympy
s, T1, T2 = sympy.symbols('s T1 T2')
sys = (1+s*T1)/(1+s*T2)
tf = systems.Tf(sys)
ga = algorithms.Ga(x, y, t, tf, -5.0, 5.0)
ga.identify(verbose=True)
print(ga.hof[0])
if __name__ == "__main__":
main()
|
Hofsmo/PyPiW
|
examples/first_order.py
|
Python
|
gpl-3.0
| 651
|
#-*- coding:Utf-8 -*-
import numpy as np
import os
import sys
import shutil
import pkgutil
import pdb
import seaborn as sns
class PyLayers(object):
""" Generic PyLayers Meta Class
"""
# sns.set_style("white")
def help(self,letter='az',typ='mt'):
""" generic help
Parameters
----------
txt : string
'mb' | 'mt'
mb :members
mt :methods
"""
members = [ x for x in self.__dict__.keys() if x not in dict.__dict__ ]
lmeth = [ x for x in np.sort(dir(self)) if x not in dict.__dict__]
if typ=='mb':
print(np.sort(self.__dict__.keys()))
if typ=='mt':
for s in lmeth:
if s not in members:
if s[0]!='_':
if len(letter)>1:
if (s[0]>=letter[0])&(s[0]<letter[1]):
try:
doc = eval('self.'+s+'.__doc__').split('\n')
print(s+': '+ doc[0])
except:
pass
else:
if (s[0]==letter[0]):
try:
doc = eval('self.'+s+'.__doc__').split('\n')
print(s+': '+ doc[0])
except:
pass
def _writedotpylayers(typ,path):
""" write .pylayers file
Parameters
----------
typ: string
source : update the path to the pylayers' source directory
project : update the path to the pylayers' project directory
path : string
path to typ
"""
home = os.path.expanduser('~')
# with open(os.path.join(home,'.pylayers'),'r') as f:
# lines = f.readlines()
with open(os.path.join(home,'.pylayers'),'a') as f:
f.write(typ+'\n')
f.write(path+'\n')
# replaceline=False
# for l in lines:
# if replaceline :
# f.write(path+"\n")
# replaceline=False
# elif typ in l:
# f.write(l)
# replaceline=True
# else:
# f.write(l)
home = os.path.expanduser('~')
currentdir = os.getcwd()
#if .pylayers exists
if os.path.isfile(os.path.join(home,'.pylayers')):
with open(os.path.join(home,'.pylayers'),'r') as f:
lines = f.readlines()
#''.join... to remove the '\n' character
pylayersdir = ''.join(lines[1].splitlines())
basename = ''.join(lines[3].splitlines())
# BACKWARD COMPATIBILITY MODE (from now .pylayers is create each install)
else:
if os.getenv('PYLAYERS') != None:
pylayersdir = os.getenv('PYLAYERS')
_writedotpylayers('source',pylayersdir)
print('PYLAYERS environement variable detected: ~/.pylayers updated')
else :
raise EnvironmentError('pylayers source path not found. Try to re-run setup.py')
if os.getenv('BASENAME') != None:
basename = os.getenv('BASENAME')
_writedotpylayers('project',basename)
print('BASENAME environement variable detected: ~/.pylayers updated')
else :
raise EnvironmentError('pylayers source path not found. Try to re-run setup.py')
# =======
# # if os.path.isfile(os.path.join(home,'.pylayers')):
# # with open(os.path.join(home,'.pylayers'),'r') as f:
# # lines = f.readlines()
# # #[:-1] to remove the '\n' character
# # pylayersdir = lines[1][:-1]
# # basename = lines[3]
# # else :
# try:
# pylayersdir = os.environ['PYLAYERS']
# except:
# pylayersdir = currentdir.split('pylayers')[0] + 'pylayers'
# if pylayersdir[-1] == '/' or pylayersdir[-1] == '\\':
# pylayersdir = pylayersdir[:-1]
# if len(pylayersdir) == 1:
# raise EnvironmentError('Please verify that pylayers sources are into the "pylayers/" directory')
# try:
# basename = os.environ['BASENAME']
# except:
# raise EnvironmentError('Please position an environement variable $BASENAME where your pylayers project will be hosted')
# >>>>>>> master
try:
mesdir = os.environ['MESDIR']
except:
mesdir = os.path.join(basename ,'meas')
try:
datadir = os.environ['DATADIR']
except:
datadir = os.path.join(basename, 'meas')
try:
os.path.isdir(os.path.join(basename ,'figures'))
except:
os.mkdir(os.path.join(basename,'figures'))
# Dictionnary which associate PYLAYERS environment variable with sub directories
# of the project
#
pstruc = {}
pstruc['DIRSIMUL'] ='ini'
pstruc['DIRWRL'] =os.path.join('struc','wrl')
pstruc['DIRLAY'] =os.path.join('struc','lay')
pstruc['DIROSM'] =os.path.join('struc','osm')
pstruc['DIRFUR'] = os.path.join('struc','furnitures')
pstruc['DIRIMAGE'] = os.path.join('struc','images')
pstruc['DIRPICKLE'] = os.path.join('struc','gpickle')
pstruc['DIRRES'] = os.path.join('struc','res')
pstruc['DIRSTR'] = os.path.join('struc','str')
pstruc['DIRSLAB'] = 'ini'
pstruc['DIRSLAB2'] = 'ini'
pstruc['DIRMAT'] = 'ini'
pstruc['DIRMAT2'] = 'ini'
pstruc['DIRANT'] = 'ant'
pstruc['DIRTRA'] = 'output'
pstruc['DIRLCH'] = 'output'
pstruc['DIRTUD'] = 'output'
pstruc['DIRTx'] = os.path.join('output','Tx001')
pstruc['DIRGEOM'] = 'geom'
pstruc['DIRTRA'] = 'output'
pstruc['DIRCIR'] = 'output'
pstruc['DIRMES'] = 'meas'
pstruc['DIRNETSAVE'] = 'netsave'
# pstruc['DIRSIG'] = os.path.join('output','sig')
pstruc['DIRR2D'] = os.path.join('output','r2d')
pstruc['DIRR3D'] = os.path.join('output','r3d')
pstruc['DIRCT'] = os.path.join('output','Ct')
pstruc['DIRH'] = os.path.join('output','H')
pstruc['DIRLNK'] = 'output'
pstruc['DIRBODY'] = 'body'
pstruc['DIRGIS'] = 'gis'
pstruc['DIRC3D'] = os.path.join('body','c3d')
pstruc['DIROOSM'] = os.path.join('gis','osm')
pstruc['DIRWEAR'] = os.path.join('body','wear')
# if basename directory does not exit it is created
try:
os.chdir(basename)
except:
print("Create directory " + basename)
os.mkdir(basename)
#
# write file project.conf
#
fd = open(os.path.join(basename,'project.conf'),'w')
fd.close()
#for nm in pstruc.keys():
for nm,nv in pstruc.items():
dirname = os.path.join(basename , pstruc[nm])
if not 'win' in sys.platform:
spl = nv.split('/') # never again a variable called sp
else:
spl = nv.split('\\') # never again a variable called sp
if len(spl)>1:
if not os.path.isdir(os.path.join(basename ,spl[0])):
os.mkdir(os.path.join(basename ,spl[0]))
os.mkdir(os.path.join(basename,nv))
print("create ",os.path.join(basename ,nv))
else:
if not os.path.isdir(os.path.join(basename ,nv)):
os.mkdir(os.path.join(basename ,nv))
print("create ",os.path.join(basename ,nv))
else :
if not os.path.isdir(dirname):
try:
os.mkdir(dirname)
except:
# dictionnary is not necessarly ordonned !
# parent directory may not be created
dirtmp= os.path.dirname(dirname)
os.mkdir(dirtmp)
os.mkdir(dirname)
print("create ",dirname)
# try:
# os.chdir(dirname)
# os.chdir('..')
# except:
# pdb.set_trace()
# sp = nv.split('/')
# if len(sp)>1:
# try:
# os.chdir(basename + '/'+sp[0])
# os.chdir('..')
# except:
# os.mkdir(basename + '/'+sp[0])
# os.chdir(basename + '/'+sp[0])
# os.mkdir(basename + '/'+sp[1])
# os.chdir('..')
# else:
# print "create "+ dirname
# os.mkdir(dirname)
# os.chdir('..')
if nm == 'DIRANT':
antdir = dirname
if nm == 'DIRFUR':
furdir = dirname
if nm == 'DIRGEOM':
geomdir = dirname
if nm == 'DIRLCH':
lchdir = dirname
if nm == 'DIRTUD':
tuddir = dirname
if nm == 'DIRSLAB':
slabdir = dirname
if nm == 'DIRMA':
matdir = dirname
if nm == 'DIRTRA':
tradir = dirname
if nm == 'DIROOSM':
osmdir = dirname
fd = open(os.path.join(basename,'project.conf'),'a')
fd.write(nm+' '+dirname +'\n')
fd.close()
#
# copy files from /data/ini in project directory
#
# IF new file taype is added :
# 1 - add the directory path to pstruc['DIRFILETYPE'] = os.path.join('path','to','filetype')
# 2 - add the directory path to dirlist( just below)
if basename != os.path.join(pylayersdir,'data'):
if not 'win' in sys.platform:
dirlist=['ini','struc','struc/furnitures'
,'struc/osm','struc/wrl','struc/res','struc/str'
,'struc/images','struc/lay'
,'ant','output/Tx001','output'
,'geom','output/r2d'
,'output/r3d','body','body/c3d','body/wear']
else :
dirlist=['ini',os.path.join('struc','furnitures')
,os.path.join('struc','osm')
,os.path.join('struc','wrl')
,os.path.join('struc','res')
,os.path.join('struc','str')
,os.path.join('struc','images')
,os.path.join('struc','lay')
,'ant',os.path.join('output','Tx001'),'output'
,'geom'
,os.path.join('output','r2d')
,os.path.join('output','r3d'),'body'
,os.path.join('body','c3d')
,os.path.join('body','wear')]
for dl in dirlist:
filelist = os.listdir(os.path.join(pylayersdir,'data', dl))
for fi in filelist:
if not os.path.isdir(os.path.join(basename,dl,fi)):
if os.path.isfile(os.path.join(basename,dl,fi)): # file already exists
pass
else:
print(dl,fi)
try:
shutil.copy(
os.path.join(pylayersdir,'data',dl,fi),
os.path.join(basename,dl,fi))
except:
pdb.set_trace()
##
os.chdir(currentdir)
## set seaborn style
sns.set_style("white")
|
buguen/pylayers
|
pylayers/util/project.py
|
Python
|
lgpl-3.0
| 10,152
|
# err.py
s = '0'
n = int(s)
print(10 / n)
|
PeytonXu/learn-python
|
learn/www.liaoxuefeng.com/err.py
|
Python
|
mit
| 41
|
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## Read the follow up data
## It was found that the v4.0 file contained more recent follow up data than v2.0, but the files contained nonredundant patients.
## So both files are loaded with the v4.0 getting preference.
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_blca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
try:
if clinical1[-1][0]==i[0]:
if i[8]=='Alive':
clinical1[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical1[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical1.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical1.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_blca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
try:
if clinical2[-1][0]==i[0]:
if i[6]=='Alive':
clinical2[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
clinical2[-1]=[i[0],int(i[8]),'Dead']
else:
pass
else:
if i[6]=='Alive':
clinical2.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
clinical2.append([i[0],int(i[8]),'Dead'])
else:
pass
except:
pass
## Removing the empty value and combining the lists.
clinical+=clinical2[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['High Grade']=1
grade_dict['Low Grade']=0
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_patient_blca.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[-5]],sex_dict[i[6]],int(i[42])]
if i[21]=='Alive':
clinical4.append([i[0],int(i[22]),'Alive'])
elif i[21]=='Dead':
clinical4.append([i[0],int(i[23]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
##normalized files were used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
|
OmnesRes/pan_cancer
|
paper/cox_regression/BLCA/patient_info.py
|
Python
|
mit
| 7,247
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-07-02 02:30
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usercenter', '0003_auto_20161008_2126'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
|
tkliuxing/bookspider
|
booksite/booksite/usercenter/migrations/0004_auto_20170702_1030.py
|
Python
|
apache-2.0
| 1,149
|
import pygame as pg
from src.game import Sprite, GameObject, Animation
from src.physics import Body, Vector
class Enemy(GameObject):
def __init__(self, pos):
# enemy = [pg.image.load('enemy1.png'), pg.image.load('enemy2.png'), pg.image.load('enemy3.png'), pg.image.load('enemy2.png')]
# imgs = [pg.image.load('static_face_1.png'), pg.image.load("static_back_1.png"), pg.image.load("static_left.png"), pg.image.load("static_right.png"), pg.image.load('hit_face_1.png'), pg.image.load("hit_back_1.png"), pg.image.load("hit_right.png"), pg.image.load("hit_left.png")]
self.body = Body(10, p=pos)
self.draw_offset = Vector(-20, -20)
self.hp = 100
self.current_sprite = Animation(paths=['assets/enemy1.png', 'assets/enemy2.png', 'assets/enemy3.png', 'assets/enemy2.png'], loop=True, frame_interval=15, size=(40,40))
self.font = pg.font.SysFont("monospace", 15)
def update(self, ticks):
from src import main
player = main.GAME.objects[0]
direction = player.body.p - self.body.p
direction = direction / abs(direction)
self.body.p += direction
if self.hp <= 0:
main.GAME.to_be_removed.append(self)
def draw(self, display):
label = self.font.render(str(self.hp), 1, (255,0,0))
display.blit(label, (self.body.p.x + self.draw_offset.x, self.body.p.y + self.draw_offset.y - 15))
self.current_sprite.draw(display, self.body.p + self.draw_offset)
def remove(self):
pass
|
LittleSmaug/summercamp2k17
|
src/objects/enemy.py
|
Python
|
gpl-3.0
| 1,405
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
EQPT_TYPES = INDIV_TYPES + BOAT_TYPES + FURNITURE_TYPES + TRAILER_TYPES + VEHICLE_TYPES
class Equipment(models.Model):
_name = 'eqpt.equipment'
_description = "Equipment"
@api.model
def _get_currency(self):
return False
name = fields.Char(string="Name")
stage_id = fields.Many2one('eqpt.stage', default=False)
legend_blocked = fields.Char(related='stage_id.legend_blocked', string="Kanban red state legend")
legend_normal = fields.Char(related='stage_id.legend_normal', string="Kanban grey state legend")
legend_done = fields.Char(related='stage_id.legend_done', string="Kanban green state legend")
kanban_state = fields.Selection(selection=[],string="Kanban State")
currency_id = fields.Many2one('res.currency', string="Currency", default=_get_currency)
price = fields.Float(string="Price")
buy_date = fields.Date(string="Buy date")
room_id = fields.Many2one('eqpt.room', string="Storage Room")
building_id = fields.Many2one('eqpt.building', related='room_id.building_id', string="Building")
eqpt_type = fieilds.Selection(selection=EQPT_TYPES, string="Equipment type")
# TODO image !!
|
RemiFr82/ck_addons
|
ck_equipment/models/eqpt_equipment.py
|
Python
|
gpl-3.0
| 1,228
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Dataset sampler."""
__all__ = ['Sampler', 'SequentialSampler', 'RandomSampler', 'BatchSampler']
import numpy as np
class Sampler(object):
"""Base class for samplers.
All samplers should subclass `Sampler` and define `__iter__` and `__len__`
methods.
"""
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
"""Samples elements from [0, length) sequentially.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length):
self._length = length
def __iter__(self):
return iter(range(self._length))
def __len__(self):
return self._length
class RandomSampler(Sampler):
"""Samples elements from [0, length) randomly without replacement.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length):
self._length = length
def __iter__(self):
indices = np.arange(self._length)
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
class BatchSampler(Sampler):
"""Wraps over another `Sampler` and return mini-batches of samples.
Parameters
----------
sampler : Sampler
The source Sampler.
batch_size : int
Size of mini-batch.
last_batch : {'keep', 'discard', 'rollover'}
Specifies how the last batch is handled if batch_size does not evenly
divide sequence length.
If 'keep', the last batch will be returned directly, but will contain
less element than `batch_size` requires.
If 'discard', the last batch will be discarded.
If 'rollover', the remaining elements will be rolled over to the next
iteration.
Examples
--------
>>> sampler = gluon.data.SequentialSampler(10)
>>> batch_sampler = gluon.data.BatchSampler(sampler, 3, 'keep')
>>> list(batch_sampler)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
def __init__(self, sampler, batch_size, last_batch='keep'):
self._sampler = sampler
self._batch_size = batch_size
self._last_batch = last_batch
self._prev = []
def __iter__(self):
batch, self._prev = self._prev, []
for i in self._sampler:
batch.append(i)
if len(batch) == self._batch_size:
yield batch
batch = []
if batch:
if self._last_batch == 'keep':
yield batch
elif self._last_batch == 'discard':
return
elif self._last_batch == 'rollover':
self._prev = batch
else:
raise ValueError(
"last_batch must be one of 'keep', 'discard', or 'rollover', " \
"but got %s"%self._last_batch)
def __len__(self):
if self._last_batch == 'keep':
return (len(self._sampler) + self._batch_size - 1) // self._batch_size
if self._last_batch == 'discard':
return len(self._sampler) // self._batch_size
if self._last_batch == 'rollover':
return (len(self._prev) + len(self._sampler)) // self._batch_size
raise ValueError(
"last_batch must be one of 'keep', 'discard', or 'rollover', " \
"but got %s"%self._last_batch)
|
indhub/mxnet
|
python/mxnet/gluon/data/sampler.py
|
Python
|
apache-2.0
| 4,279
|
from django import template
from playlist.models import ScheduledPlaylist
register = template.Library()
@register.tag
def get_current_playlist_entry(parser, token):
try:
tag_name, for_arg, obj, as_arg, as_varname = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('get_current_playlist_entry tag requires 2 arguments (obj, as_varname), %s given' % (len(token.split_contents()) - 1))
return GetCurrentPlaylistEntryNode(obj, as_varname)
class GetCurrentPlaylistEntryNode(template.Node):
def __init__(self, obj, as_varname):
self.obj = template.Variable(obj)
self.as_varname = as_varname
def render(self, context):
obj = self.obj.resolve(context)
context[self.as_varname] = ScheduledPlaylist.get_current_playlist_entry_for(obj)
return ''
@register.tag
def get_next_playlist_entry(parser, token):
try:
tag_name, for_arg, obj, as_arg, as_varname = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('get_next_playlist_entry tag requires 2 arguments (obj, as_varname), %s given' % (len(token.split_contents()) - 1))
return GetNextPlaylistEntryNode(obj, as_varname)
class GetNextPlaylistEntryNode(template.Node):
def __init__(self, obj, as_varname):
self.obj = template.Variable(obj)
self.as_varname = as_varname
def render(self, context):
obj = self.obj.resolve(context)
context[self.as_varname] = ScheduledPlaylist.get_next_playlist_entry_for(obj)
return ''
|
praekelt/panya-playlist
|
playlist/templatetags/playlist_template_tags.py
|
Python
|
bsd-3-clause
| 1,566
|
import json
import config
import sys
def check_configuration():
if config.access_token == '' or config.access_token_secret == '' or\
config.consumer_key == '' or config.consumer_secret == '':
print('Check config.py file and write the Twitter keys there.')
sys.exit(1)
|
cpina/twitter2rss
|
utils.py
|
Python
|
agpl-3.0
| 298
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9447")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
|
IlfirinIlfirin/shavercoin
|
contrib/wallettools/walletchangepass.py
|
Python
|
mit
| 220
|
"""
SoftLayer.API
~~~~~~~~~~~~~
SoftLayer API bindings
:license: MIT, see LICENSE for more details.
"""
# pylint: disable=invalid-name
import time
import warnings
import json
import logging
import requests
from SoftLayer import auth as slauth
from SoftLayer import config
from SoftLayer import consts
from SoftLayer import exceptions
from SoftLayer import transports
LOGGER = logging.getLogger(__name__)
API_PUBLIC_ENDPOINT = consts.API_PUBLIC_ENDPOINT
API_PRIVATE_ENDPOINT = consts.API_PRIVATE_ENDPOINT
CONFIG_FILE = consts.CONFIG_FILE
__all__ = [
'create_client_from_env',
'Client',
'BaseClient',
'API_PUBLIC_ENDPOINT',
'API_PRIVATE_ENDPOINT',
'IAMClient',
]
VALID_CALL_ARGS = set((
'id',
'mask',
'filter',
'headers',
'compress',
'raw_headers',
'limit',
'offset',
'verify',
))
def create_client_from_env(username=None,
api_key=None,
endpoint_url=None,
timeout=None,
auth=None,
config_file=None,
proxy=None,
user_agent=None,
transport=None,
verify=True):
"""Creates a SoftLayer API client using your environment.
Settings are loaded via keyword arguments, environemtal variables and
config file.
:param username: an optional API username if you wish to bypass the
package's built-in username
:param api_key: an optional API key if you wish to bypass the package's
built in API key
:param endpoint_url: the API endpoint base URL you wish to connect to.
Set this to API_PRIVATE_ENDPOINT to connect via SoftLayer's private
network.
:param proxy: proxy to be used to make API calls
:param integer timeout: timeout for API requests
:param auth: an object which responds to get_headers() to be inserted into
the xml-rpc headers. Example: `BasicAuthentication`
:param config_file: A path to a configuration file used to load settings
:param user_agent: an optional User Agent to report when making API
calls if you wish to bypass the packages built in User Agent string
:param transport: An object that's callable with this signature:
transport(SoftLayer.transports.Request)
:param bool verify: decide to verify the server's SSL/TLS cert. DO NOT SET
TO FALSE WITHOUT UNDERSTANDING THE IMPLICATIONS.
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> resp = client.call('Account', 'getObject')
>>> resp['companyName']
'Your Company'
"""
if config_file is None:
config_file = CONFIG_FILE
settings = config.get_client_settings(username=username,
api_key=api_key,
endpoint_url=endpoint_url,
timeout=timeout,
proxy=proxy,
verify=verify,
config_file=config_file)
if transport is None:
url = settings.get('endpoint_url')
if url is not None and '/rest' in url:
# If this looks like a rest endpoint, use the rest transport
transport = transports.RestTransport(
endpoint_url=settings.get('endpoint_url'),
proxy=settings.get('proxy'),
timeout=settings.get('timeout'),
user_agent=user_agent,
verify=verify,
)
else:
# Default the transport to use XMLRPC
transport = transports.XmlRpcTransport(
endpoint_url=settings.get('endpoint_url'),
proxy=settings.get('proxy'),
timeout=settings.get('timeout'),
user_agent=user_agent,
verify=verify,
)
# If we have enough information to make an auth driver, let's do it
if auth is None and settings.get('username') and settings.get('api_key'):
# NOTE(kmcdonald): some transports mask other transports, so this is
# a way to find the 'real' one
real_transport = getattr(transport, 'transport', transport)
if isinstance(real_transport, transports.XmlRpcTransport):
auth = slauth.BasicAuthentication(
settings.get('username'),
settings.get('api_key'),
)
elif isinstance(real_transport, transports.RestTransport):
auth = slauth.BasicHTTPAuthentication(
settings.get('username'),
settings.get('api_key'),
)
return BaseClient(auth=auth, transport=transport, config_file=config_file)
def Client(**kwargs):
"""Get a SoftLayer API Client using environmental settings.
Deprecated in favor of create_client_from_env()
"""
warnings.warn("use SoftLayer.create_client_from_env() instead",
DeprecationWarning)
return create_client_from_env(**kwargs)
class BaseClient(object):
"""Base SoftLayer API client.
:param auth: auth driver that looks like SoftLayer.auth.AuthenticationBase
:param transport: An object that's callable with this signature:
transport(SoftLayer.transports.Request)
"""
_prefix = "SoftLayer_"
def __init__(self, auth=None, transport=None, config_file=None):
if config_file is None:
config_file = CONFIG_FILE
self.auth = auth
self.config_file = config_file
self.settings = config.get_config(self.config_file)
if transport is None:
url = self.settings['softlayer'].get('endpoint_url')
if url is not None and '/rest' in url:
# If this looks like a rest endpoint, use the rest transport
transport = transports.RestTransport(
endpoint_url=url,
proxy=self.settings['softlayer'].get('proxy'),
# prevents an exception incase timeout is a float number.
timeout=int(self.settings['softlayer'].getfloat('timeout')),
user_agent=consts.USER_AGENT,
verify=self.settings['softlayer'].getboolean('verify'),
)
else:
# Default the transport to use XMLRPC
transport = transports.XmlRpcTransport(
endpoint_url=url,
proxy=self.settings['softlayer'].get('proxy'),
timeout=int(self.settings['softlayer'].getfloat('timeout')),
user_agent=consts.USER_AGENT,
verify=self.settings['softlayer'].getboolean('verify'),
)
self.transport = transport
def authenticate_with_password(self, username, password,
security_question_id=None,
security_question_answer=None):
"""Performs Username/Password Authentication
:param string username: your SoftLayer username
:param string password: your SoftLayer password
:param int security_question_id: The security question id to answer
:param string security_question_answer: The answer to the security question
"""
self.auth = None
res = self.call('User_Customer', 'getPortalLoginToken',
username,
password,
security_question_id,
security_question_answer)
self.auth = slauth.TokenAuthentication(res['userId'], res['hash'])
return res['userId'], res['hash']
def __getitem__(self, name):
"""Get a SoftLayer Service.
:param name: The name of the service. E.G. Account
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client['Account']
<Service: Account>
"""
return Service(self, name)
def call(self, service, method, *args, **kwargs):
"""Make a SoftLayer API call.
:param method: the method to call on the service
:param \\*args: (optional) arguments for the remote call
:param id: (optional) id for the resource
:param mask: (optional) object mask
:param dict filter: (optional) filter dict
:param dict headers: (optional) optional XML-RPC headers
:param boolean compress: (optional) Enable/Disable HTTP compression
:param dict raw_headers: (optional) HTTP transport headers
:param int limit: (optional) return at most this many results
:param int offset: (optional) offset results by this many
:param boolean iter: (optional) if True, returns a generator with the results
:param bool verify: verify SSL cert
:param cert: client certificate path
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client.call('Account', 'getVirtualGuests', mask="id", limit=10)
[...]
"""
if kwargs.pop('iter', False):
# Most of the codebase assumes a non-generator will be returned, so casting to list
# keeps those sections working
return list(self.iter_call(service, method, *args, **kwargs))
invalid_kwargs = set(kwargs.keys()) - VALID_CALL_ARGS
if invalid_kwargs:
raise TypeError(
'Invalid keyword arguments: %s' % ','.join(invalid_kwargs))
prefixes = (self._prefix, 'BluePages_Search', 'IntegratedOfferingTeam_Region')
if self._prefix and not service.startswith(prefixes):
service = self._prefix + service
http_headers = {'Accept': '*/*'}
if kwargs.get('compress', True):
http_headers['Accept-Encoding'] = 'gzip, deflate, compress'
else:
http_headers['Accept-Encoding'] = None
if kwargs.get('raw_headers'):
http_headers.update(kwargs.get('raw_headers'))
request = transports.Request()
request.service = service
request.method = method
request.args = args
request.transport_headers = http_headers
request.identifier = kwargs.get('id')
request.mask = kwargs.get('mask')
request.filter = kwargs.get('filter')
request.limit = kwargs.get('limit')
request.offset = kwargs.get('offset')
if kwargs.get('verify') is not None:
request.verify = kwargs.get('verify')
if self.auth:
extra_headers = self.auth.get_headers()
if extra_headers:
warnings.warn("auth.get_headers() is deprecated and will be "
"removed in the next major version",
DeprecationWarning)
request.headers.update(extra_headers)
request = self.auth.get_request(request)
request.headers.update(kwargs.get('headers', {}))
return self.transport(request)
__call__ = call
def iter_call(self, service, method, *args, **kwargs):
"""A generator that deals with paginating through results.
:param service: the name of the SoftLayer API service
:param method: the method to call on the service
:param integer limit: result size for each API call (defaults to 100)
:param \\*args: same optional arguments that ``Service.call`` takes
:param \\*\\*kwargs: same optional keyword arguments that ``Service.call`` takes
"""
limit = kwargs.pop('limit', 100)
offset = kwargs.pop('offset', 0)
if limit <= 0:
raise AttributeError("Limit size should be greater than zero.")
# Set to make unit tests, which call this function directly, play nice.
kwargs['iter'] = False
result_count = 0
keep_looping = True
while keep_looping:
# Get the next results
results = self.call(service, method, offset=offset, limit=limit, *args, **kwargs)
# Apparently this method doesn't return a list.
# Why are you even iterating over this?
if not isinstance(results, transports.SoftLayerListResult):
if isinstance(results, list):
# Close enough, this makes testing a lot easier
results = transports.SoftLayerListResult(results, len(results))
else:
yield results
return
for item in results:
yield item
result_count += 1
# Got less results than requested, we are at the end
if len(results) < limit:
keep_looping = False
# Got all the needed items
if result_count >= results.total_count:
keep_looping = False
offset += limit
def __repr__(self):
return "Client(transport=%r, auth=%r)" % (self.transport, self.auth)
__str__ = __repr__
def __len__(self):
return 0
class IAMClient(BaseClient):
"""IBM ID Client for using IAM authentication
:param auth: auth driver that looks like SoftLayer.auth.AuthenticationBase
:param transport: An object that's callable with this signature: transport(SoftLayer.transports.Request)
"""
def authenticate_with_password(self, username, password, security_question_id=None, security_question_answer=None):
"""Performs IBM IAM Username/Password Authentication
:param string username: your IBMid username
:param string password: your IBMid password
"""
iam_client = requests.Session()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': consts.USER_AGENT,
'Accept': 'application/json'
}
data = {
'grant_type': 'password',
'password': password,
'response_type': 'cloud_iam',
'username': username
}
try:
response = iam_client.request(
'POST',
'https://iam.cloud.ibm.com/identity/token',
data=data,
headers=headers,
auth=requests.auth.HTTPBasicAuth('bx', 'bx')
)
if response.status_code != 200:
LOGGER.error("Unable to login: %s", response.text)
response.raise_for_status()
tokens = json.loads(response.text)
except requests.HTTPError as ex:
error = json.loads(response.text)
raise exceptions.IAMError(response.status_code,
error.get('errorMessage'),
'https://iam.cloud.ibm.com/identity/token') from ex
self.settings['softlayer']['access_token'] = tokens['access_token']
self.settings['softlayer']['refresh_token'] = tokens['refresh_token']
config.write_config(self.settings, self.config_file)
self.auth = slauth.BearerAuthentication('', tokens['access_token'], tokens['refresh_token'])
return tokens
def authenticate_with_passcode(self, passcode):
"""Performs IBM IAM SSO Authentication
:param string passcode: your IBMid password
"""
iam_client = requests.Session()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': consts.USER_AGENT,
'Accept': 'application/json'
}
data = {
'grant_type': 'urn:ibm:params:oauth:grant-type:passcode',
'passcode': passcode,
'response_type': 'cloud_iam'
}
try:
response = iam_client.request(
'POST',
'https://iam.cloud.ibm.com/identity/token',
data=data,
headers=headers,
auth=requests.auth.HTTPBasicAuth('bx', 'bx')
)
if response.status_code != 200:
LOGGER.error("Unable to login: %s", response.text)
response.raise_for_status()
tokens = json.loads(response.text)
except requests.HTTPError as ex:
error = json.loads(response.text)
raise exceptions.IAMError(response.status_code,
error.get('errorMessage'),
'https://iam.cloud.ibm.com/identity/token') from ex
self.settings['softlayer']['access_token'] = tokens['access_token']
self.settings['softlayer']['refresh_token'] = tokens['refresh_token']
a_expire = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(tokens['expiration']))
r_expire = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(tokens['refresh_token_expiration']))
LOGGER.warning("Tokens retrieved, expires at %s, Refresh expires at %s", a_expire, r_expire)
config.write_config(self.settings, self.config_file)
self.auth = slauth.BearerAuthentication('', tokens['access_token'], tokens['refresh_token'])
return tokens
def authenticate_with_iam_token(self, a_token, r_token=None):
"""Authenticates to the SL API with an IAM Token
:param string a_token: Access token
:param string r_token: Refresh Token, to be used if Access token is expired.
"""
self.auth = slauth.BearerAuthentication('', a_token, r_token)
def refresh_iam_token(self, r_token, account_id=None, ims_account=None):
"""Refreshes the IAM Token, will default to values in the config file"""
iam_client = requests.Session()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': consts.USER_AGENT,
'Accept': 'application/json'
}
data = {
'grant_type': 'refresh_token',
'refresh_token': r_token,
'response_type': 'cloud_iam'
}
sl_config = self.settings['softlayer']
if account_id is None and sl_config.get('account_id', False):
account_id = sl_config.get('account_id')
if ims_account is None and sl_config.get('ims_account', False):
ims_account = sl_config.get('ims_account')
data['account'] = account_id
data['ims_account'] = ims_account
try:
response = iam_client.request(
'POST',
'https://iam.cloud.ibm.com/identity/token',
data=data,
headers=headers,
auth=requests.auth.HTTPBasicAuth('bx', 'bx')
)
if response.status_code != 200:
LOGGER.warning("Unable to refresh IAM Token. %s", response.text)
response.raise_for_status()
tokens = json.loads(response.text)
except requests.HTTPError as ex:
error = json.loads(response.text)
raise exceptions.IAMError(response.status_code,
error.get('errorMessage'),
'https://iam.cloud.ibm.com/identity/token') from ex
a_expire = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(tokens['expiration']))
r_expire = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(tokens['refresh_token_expiration']))
LOGGER.warning("Tokens retrieved, expires at %s, Refresh expires at %s", a_expire, r_expire)
self.settings['softlayer']['access_token'] = tokens['access_token']
self.settings['softlayer']['refresh_token'] = tokens['refresh_token']
config.write_config(self.settings, self.config_file)
self.auth = slauth.BearerAuthentication('', tokens['access_token'])
return tokens
def call(self, service, method, *args, **kwargs):
"""Handles refreshing IAM tokens in case of a HTTP 401 error"""
try:
return super().call(service, method, *args, **kwargs)
except exceptions.SoftLayerAPIError as ex:
if ex.faultCode == 401:
LOGGER.warning("Token has expired, trying to refresh. %s", ex.faultString)
return ex
else:
raise ex
def __repr__(self):
return "IAMClient(transport=%r, auth=%r)" % (self.transport, self.auth)
class Service(object):
"""A SoftLayer Service.
:param client: A SoftLayer.API.Client instance
:param name str: The service name
"""
def __init__(self, client, name):
self.client = client
self.name = name
def call(self, name, *args, **kwargs):
"""Make a SoftLayer API call
:param service: the name of the SoftLayer API service
:param method: the method to call on the service
:param \\*args: same optional arguments that ``BaseClient.call`` takes
:param \\*\\*kwargs: same optional keyword arguments that
``BaseClient.call`` takes
:param service: the name of the SoftLayer API service
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client['Account'].getVirtualGuests(mask="id", limit=10)
[...]
"""
return self.client.call(self.name, name, *args, **kwargs)
__call__ = call
def iter_call(self, name, *args, **kwargs):
"""A generator that deals with paginating through results.
:param method: the method to call on the service
:param integer chunk: result size for each API call
:param \\*args: same optional arguments that ``Service.call`` takes
:param \\*\\*kwargs: same optional keyword arguments that
``Service.call`` takes
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> gen = client.call('Account', 'getVirtualGuests', iter=True)
>>> for virtual_guest in gen:
... virtual_guest['id']
...
1234
4321
"""
return self.client.iter_call(self.name, name, *args, **kwargs)
def __getattr__(self, name):
if name in ["__name__", "__bases__"]:
raise AttributeError("'Obj' object has no attribute '%s'" % name)
def call_handler(*args, **kwargs):
" Handler that actually makes the API call "
return self(name, *args, **kwargs)
return call_handler
def __repr__(self):
return "<Service: %s>" % (self.name,)
__str__ = __repr__
|
softlayer/softlayer-python
|
SoftLayer/API.py
|
Python
|
mit
| 22,926
|
# Copyright (C) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import select
import socket
import threading
import time
import uuid
import six
from six import moves
import testtools
from oslo import messaging
from oslo_messaging.tests import utils as test_utils
if six.PY2:
# NOTE(flaper87): pyngus currently doesn't support py34. It's
# on the works, though.
from oslo_messaging._drivers.protocols.amqp import driver as amqp_driver
import pyngus
LOG = logging.getLogger(__name__)
class _ListenerThread(threading.Thread):
"""Run a blocking listener in a thread."""
def __init__(self, listener, msg_count):
super(_ListenerThread, self).__init__()
self.listener = listener
self.msg_count = msg_count
self.messages = moves.queue.Queue()
self.daemon = True
self.start()
def run(self):
LOG.debug("Listener started")
while self.msg_count > 0:
in_msg = self.listener.poll()
self.messages.put(in_msg)
self.msg_count -= 1
if in_msg.message.get('method') == 'echo':
in_msg.reply(reply={'correlation-id':
in_msg.message.get('id')})
LOG.debug("Listener stopped")
def get_messages(self):
"""Returns a list of all received messages."""
msgs = []
try:
while True:
m = self.messages.get(False)
msgs.append(m)
except moves.queue.Empty:
pass
return msgs
@testtools.skipUnless(six.PY2, "No Py3K support yet")
class TestProtonDriverLoad(test_utils.BaseTestCase):
def setUp(self):
super(TestProtonDriverLoad, self).setUp()
self.messaging_conf.transport_driver = 'amqp'
def test_driver_load(self):
transport = messaging.get_transport(self.conf)
self.assertIsInstance(transport._driver,
amqp_driver.ProtonDriver)
class _AmqpBrokerTestCase(test_utils.BaseTestCase):
@testtools.skipUnless(six.PY2, "No Py3K support yet")
def setUp(self):
super(_AmqpBrokerTestCase, self).setUp()
self._broker = FakeBroker()
self._broker_addr = "amqp://%s:%d" % (self._broker.host,
self._broker.port)
self._broker_url = messaging.TransportURL.parse(self.conf,
self._broker_addr)
self._broker.start()
def tearDown(self):
super(_AmqpBrokerTestCase, self).tearDown()
self._broker.stop()
class TestAmqpSend(_AmqpBrokerTestCase):
"""Test sending and receiving messages."""
def test_driver_unconnected_cleanup(self):
"""Verify the driver can cleanly shutdown even if never connected."""
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
driver.cleanup()
def test_listener_cleanup(self):
"""Verify unused listener can cleanly shutdown."""
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
target = messaging.Target(topic="test-topic")
listener = driver.listen(target)
self.assertIsInstance(listener, amqp_driver.ProtonListener)
driver.cleanup()
def test_send_no_reply(self):
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
target = messaging.Target(topic="test-topic")
listener = _ListenerThread(driver.listen(target), 1)
rc = driver.send(target, {"context": True},
{"msg": "value"}, wait_for_reply=False)
self.assertIsNone(rc)
listener.join(timeout=30)
self.assertFalse(listener.isAlive())
self.assertEqual(listener.messages.get().message, {"msg": "value"})
driver.cleanup()
def test_send_exchange_with_reply(self):
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
target1 = messaging.Target(topic="test-topic", exchange="e1")
listener1 = _ListenerThread(driver.listen(target1), 1)
target2 = messaging.Target(topic="test-topic", exchange="e2")
listener2 = _ListenerThread(driver.listen(target2), 1)
rc = driver.send(target1, {"context": "whatever"},
{"method": "echo", "id": "e1"},
wait_for_reply=True,
timeout=30)
self.assertIsNotNone(rc)
self.assertEqual(rc.get('correlation-id'), 'e1')
rc = driver.send(target2, {"context": "whatever"},
{"method": "echo", "id": "e2"},
wait_for_reply=True,
timeout=30)
self.assertIsNotNone(rc)
self.assertEqual(rc.get('correlation-id'), 'e2')
listener1.join(timeout=30)
self.assertFalse(listener1.isAlive())
listener2.join(timeout=30)
self.assertFalse(listener2.isAlive())
driver.cleanup()
def test_messaging_patterns(self):
"""Verify the direct, shared, and fanout message patterns work."""
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
target1 = messaging.Target(topic="test-topic", server="server1")
listener1 = _ListenerThread(driver.listen(target1), 4)
target2 = messaging.Target(topic="test-topic", server="server2")
listener2 = _ListenerThread(driver.listen(target2), 3)
shared_target = messaging.Target(topic="test-topic")
fanout_target = messaging.Target(topic="test-topic",
fanout=True)
# this should go to only one server:
driver.send(shared_target, {"context": "whatever"},
{"method": "echo", "id": "either-1"},
wait_for_reply=True)
self.assertEqual(self._broker.topic_count, 1)
self.assertEqual(self._broker.direct_count, 1) # reply
# this should go to the other server:
driver.send(shared_target, {"context": "whatever"},
{"method": "echo", "id": "either-2"},
wait_for_reply=True)
self.assertEqual(self._broker.topic_count, 2)
self.assertEqual(self._broker.direct_count, 2) # reply
# these should only go to listener1:
driver.send(target1, {"context": "whatever"},
{"method": "echo", "id": "server1-1"},
wait_for_reply=True)
driver.send(target1, {"context": "whatever"},
{"method": "echo", "id": "server1-2"},
wait_for_reply=True)
self.assertEqual(self._broker.direct_count, 6) # 2X(send+reply)
# this should only go to listener2:
driver.send(target2, {"context": "whatever"},
{"method": "echo", "id": "server2"},
wait_for_reply=True)
self.assertEqual(self._broker.direct_count, 8)
# both listeners should get a copy:
driver.send(fanout_target, {"context": "whatever"},
{"method": "echo", "id": "fanout"})
listener1.join(timeout=30)
self.assertFalse(listener1.isAlive())
listener2.join(timeout=30)
self.assertFalse(listener2.isAlive())
self.assertEqual(self._broker.fanout_count, 1)
listener1_ids = [x.message.get('id') for x in listener1.get_messages()]
listener2_ids = [x.message.get('id') for x in listener2.get_messages()]
self.assertTrue('fanout' in listener1_ids and
'fanout' in listener2_ids)
self.assertTrue('server1-1' in listener1_ids and
'server1-1' not in listener2_ids)
self.assertTrue('server1-2' in listener1_ids and
'server1-2' not in listener2_ids)
self.assertTrue('server2' in listener2_ids and
'server2' not in listener1_ids)
if 'either-1' in listener1_ids:
self.assertTrue('either-2' in listener2_ids and
'either-2' not in listener1_ids and
'either-1' not in listener2_ids)
else:
self.assertTrue('either-2' in listener1_ids and
'either-2' not in listener2_ids and
'either-1' in listener2_ids)
driver.cleanup()
def test_send_timeout(self):
"""Verify send timeout."""
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
target = messaging.Target(topic="test-topic")
listener = _ListenerThread(driver.listen(target), 1)
# the listener will drop this message:
try:
driver.send(target,
{"context": "whatever"},
{"method": "drop"},
wait_for_reply=True,
timeout=1.0)
except Exception as ex:
self.assertIsInstance(ex, messaging.MessagingTimeout, ex)
else:
self.assertTrue(False, "No Exception raised!")
listener.join(timeout=30)
self.assertFalse(listener.isAlive())
driver.cleanup()
class TestAmqpNotification(_AmqpBrokerTestCase):
"""Test sending and receiving notifications."""
def test_notification(self):
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
notifications = [(messaging.Target(topic="topic-1"), 'info'),
(messaging.Target(topic="topic-1"), 'error'),
(messaging.Target(topic="topic-2"), 'debug')]
nl = driver.listen_for_notifications(notifications, None)
# send one for each support version:
msg_count = len(notifications) * 2
listener = _ListenerThread(nl, msg_count)
targets = ['topic-1.info',
'topic-1.bad', # will raise MessagingDeliveryFailure
'bad-topic.debug', # will raise MessagingDeliveryFailure
'topic-1.error',
'topic-2.debug']
excepted_targets = []
exception_count = 0
for version in (1.0, 2.0):
for t in targets:
try:
driver.send_notification(messaging.Target(topic=t),
"context", {'target': t},
version)
except messaging.MessageDeliveryFailure:
exception_count += 1
excepted_targets.append(t)
listener.join(timeout=30)
self.assertFalse(listener.isAlive())
topics = [x.message.get('target') for x in listener.get_messages()]
self.assertEqual(len(topics), msg_count)
self.assertEqual(topics.count('topic-1.info'), 2)
self.assertEqual(topics.count('topic-1.error'), 2)
self.assertEqual(topics.count('topic-2.debug'), 2)
self.assertEqual(self._broker.dropped_count, 4)
self.assertEqual(exception_count, 4)
self.assertEqual(excepted_targets.count('topic-1.bad'), 2)
self.assertEqual(excepted_targets.count('bad-topic.debug'), 2)
driver.cleanup()
@testtools.skipUnless(six.PY2, "No Py3K support yet")
class TestAuthentication(test_utils.BaseTestCase):
def setUp(self):
super(TestAuthentication, self).setUp()
# for simplicity, encode the credentials as they would appear 'on the
# wire' in a SASL frame - username and password prefixed by zero.
user_credentials = ["\0joe\0secret"]
self._broker = FakeBroker(sasl_mechanisms="PLAIN",
user_credentials=user_credentials)
self._broker.start()
def tearDown(self):
super(TestAuthentication, self).tearDown()
self._broker.stop()
def test_authentication_ok(self):
"""Verify that username and password given in TransportHost are
accepted by the broker.
"""
addr = "amqp://joe:secret@%s:%d" % (self._broker.host,
self._broker.port)
url = messaging.TransportURL.parse(self.conf, addr)
driver = amqp_driver.ProtonDriver(self.conf, url)
target = messaging.Target(topic="test-topic")
listener = _ListenerThread(driver.listen(target), 1)
rc = driver.send(target, {"context": True},
{"method": "echo"}, wait_for_reply=True)
self.assertIsNotNone(rc)
listener.join(timeout=30)
self.assertFalse(listener.isAlive())
driver.cleanup()
def test_authentication_failure(self):
"""Verify that a bad password given in TransportHost is
rejected by the broker.
"""
addr = "amqp://joe:badpass@%s:%d" % (self._broker.host,
self._broker.port)
url = messaging.TransportURL.parse(self.conf, addr)
driver = amqp_driver.ProtonDriver(self.conf, url)
target = messaging.Target(topic="test-topic")
_ListenerThread(driver.listen(target), 1)
self.assertRaises(messaging.MessagingTimeout,
driver.send,
target, {"context": True},
{"method": "echo"},
wait_for_reply=True,
timeout=2.0)
driver.cleanup()
@testtools.skipUnless(six.PY2, "No Py3K support yet")
class TestFailover(test_utils.BaseTestCase):
def setUp(self):
super(TestFailover, self).setUp()
self._brokers = [FakeBroker(), FakeBroker()]
hosts = []
for broker in self._brokers:
hosts.append(messaging.TransportHost(hostname=broker.host,
port=broker.port))
self._broker_url = messaging.TransportURL(self.conf,
transport="amqp",
hosts=hosts)
def tearDown(self):
super(TestFailover, self).tearDown()
for broker in self._brokers:
if broker.isAlive():
broker.stop()
def test_broker_failover(self):
"""Simulate failover of one broker to another."""
self._brokers[0].start()
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
target = messaging.Target(topic="my-topic")
listener = _ListenerThread(driver.listen(target), 2)
rc = driver.send(target, {"context": "whatever"},
{"method": "echo", "id": "echo-1"},
wait_for_reply=True,
timeout=30)
self.assertIsNotNone(rc)
self.assertEqual(rc.get('correlation-id'), 'echo-1')
# 1 request msg, 1 response:
self.assertEqual(self._brokers[0].topic_count, 1)
self.assertEqual(self._brokers[0].direct_count, 1)
# fail broker 0 and start broker 1:
self._brokers[0].stop()
self._brokers[1].start()
deadline = time.time() + 30
responded = False
sequence = 2
while deadline > time.time() and not responded:
if not listener.isAlive():
# listener may have exited after replying to an old correlation
# id: restart new listener
listener = _ListenerThread(driver.listen(target), 1)
try:
rc = driver.send(target, {"context": "whatever"},
{"method": "echo",
"id": "echo-%d" % sequence},
wait_for_reply=True,
timeout=2)
self.assertIsNotNone(rc)
self.assertEqual(rc.get('correlation-id'),
'echo-%d' % sequence)
responded = True
except messaging.MessagingTimeout:
sequence += 1
self.assertTrue(responded)
listener.join(timeout=30)
self.assertFalse(listener.isAlive())
# note: stopping the broker first tests cleaning up driver without a
# connection active
self._brokers[1].stop()
driver.cleanup()
class FakeBroker(threading.Thread):
"""A test AMQP message 'broker'."""
if six.PY2:
class Connection(pyngus.ConnectionEventHandler):
"""A single AMQP connection."""
def __init__(self, server, socket_, name,
sasl_mechanisms, user_credentials):
"""Create a Connection using socket_."""
self.socket = socket_
self.name = name
self.server = server
self.connection = server.container.create_connection(name,
self)
self.connection.user_context = self
self.sasl_mechanisms = sasl_mechanisms
self.user_credentials = user_credentials
if sasl_mechanisms:
self.connection.pn_sasl.mechanisms(sasl_mechanisms)
self.connection.pn_sasl.server()
self.connection.open()
self.sender_links = set()
self.closed = False
def destroy(self):
"""Destroy the test connection."""
while self.sender_links:
link = self.sender_links.pop()
link.destroy()
self.connection.destroy()
self.connection = None
self.socket.close()
def fileno(self):
"""Allows use of this in a select() call."""
return self.socket.fileno()
def process_input(self):
"""Called when socket is read-ready."""
try:
pyngus.read_socket_input(self.connection, self.socket)
except socket.error:
pass
self.connection.process(time.time())
def send_output(self):
"""Called when socket is write-ready."""
try:
pyngus.write_socket_output(self.connection,
self.socket)
except socket.error:
pass
self.connection.process(time.time())
# Pyngus ConnectionEventHandler callbacks:
def connection_remote_closed(self, connection, reason):
"""Peer has closed the connection."""
self.connection.close()
def connection_closed(self, connection):
"""Connection close completed."""
self.closed = True # main loop will destroy
def connection_failed(self, connection, error):
"""Connection failure detected."""
self.connection_closed(connection)
def sender_requested(self, connection, link_handle,
name, requested_source, properties):
"""Create a new message source."""
addr = requested_source or "source-" + uuid.uuid4().hex
link = FakeBroker.SenderLink(self.server, self,
link_handle, addr)
self.sender_links.add(link)
def receiver_requested(self, connection, link_handle,
name, requested_target, properties):
"""Create a new message consumer."""
addr = requested_target or "target-" + uuid.uuid4().hex
FakeBroker.ReceiverLink(self.server, self,
link_handle, addr)
def sasl_step(self, connection, pn_sasl):
if self.sasl_mechanisms == 'PLAIN':
credentials = pn_sasl.recv()
if not credentials:
return # wait until some arrives
if credentials not in self.user_credentials:
# failed
return pn_sasl.done(pn_sasl.AUTH)
pn_sasl.done(pn_sasl.OK)
class SenderLink(pyngus.SenderEventHandler):
"""An AMQP sending link."""
def __init__(self, server, conn, handle, src_addr=None):
self.server = server
cnn = conn.connection
self.link = cnn.accept_sender(handle,
source_override=src_addr,
event_handler=self)
self.link.open()
self.routed = False
def destroy(self):
"""Destroy the link."""
self._cleanup()
if self.link:
self.link.destroy()
self.link = None
def send_message(self, message):
"""Send a message over this link."""
self.link.send(message)
def _cleanup(self):
if self.routed:
self.server.remove_route(self.link.source_address,
self)
self.routed = False
# Pyngus SenderEventHandler callbacks:
def sender_active(self, sender_link):
self.server.add_route(self.link.source_address, self)
self.routed = True
def sender_remote_closed(self, sender_link, error):
self._cleanup()
self.link.close()
def sender_closed(self, sender_link):
self.destroy()
class ReceiverLink(pyngus.ReceiverEventHandler):
"""An AMQP Receiving link."""
def __init__(self, server, conn, handle, addr=None):
self.server = server
cnn = conn.connection
self.link = cnn.accept_receiver(handle,
target_override=addr,
event_handler=self)
self.link.open()
self.link.add_capacity(10)
# ReceiverEventHandler callbacks:
def receiver_remote_closed(self, receiver_link, error):
self.link.close()
def receiver_closed(self, receiver_link):
self.link.destroy()
self.link = None
def message_received(self, receiver_link, message, handle):
"""Forward this message out the proper sending link."""
if self.server.forward_message(message):
self.link.message_accepted(handle)
else:
self.link.message_rejected(handle)
if self.link.capacity < 1:
self.link.add_capacity(10)
def __init__(self, server_prefix="exclusive",
broadcast_prefix="broadcast",
group_prefix="unicast",
address_separator=".",
sock_addr="", sock_port=0,
sasl_mechanisms="ANONYMOUS",
user_credentials=None):
"""Create a fake broker listening on sock_addr:sock_port."""
if not pyngus:
raise AssertionError("pyngus module not present")
threading.Thread.__init__(self)
self._server_prefix = server_prefix + address_separator
self._broadcast_prefix = broadcast_prefix + address_separator
self._group_prefix = group_prefix + address_separator
self._address_separator = address_separator
self._sasl_mechanisms = sasl_mechanisms
self._user_credentials = user_credentials
self._wakeup_pipe = os.pipe()
self._my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._my_socket.bind((sock_addr, sock_port))
self.host, self.port = self._my_socket.getsockname()
self.container = pyngus.Container("test_server_%s:%d"
% (self.host, self.port))
self._connections = {}
self._sources = {}
# count of messages forwarded, by messaging pattern
self.direct_count = 0
self.topic_count = 0
self.fanout_count = 0
self.dropped_count = 0
def start(self):
"""Start the server."""
LOG.debug("Starting Test Broker on %s:%d", self.host, self.port)
self._shutdown = False
self.daemon = True
self._my_socket.listen(10)
super(FakeBroker, self).start()
def stop(self):
"""Shutdown the server."""
LOG.debug("Stopping test Broker %s:%d", self.host, self.port)
self._shutdown = True
os.write(self._wakeup_pipe[1], "!")
self.join()
LOG.debug("Test Broker %s:%d stopped", self.host, self.port)
def run(self):
"""Process I/O and timer events until the broker is stopped."""
LOG.debug("Test Broker on %s:%d started", self.host, self.port)
while not self._shutdown:
readers, writers, timers = self.container.need_processing()
# map pyngus Connections back to _TestConnections:
readfd = [c.user_context for c in readers]
readfd.extend([self._my_socket, self._wakeup_pipe[0]])
writefd = [c.user_context for c in writers]
timeout = None
if timers:
# [0] == next expiring timer
deadline = timers[0].next_tick
now = time.time()
timeout = 0 if deadline <= now else deadline - now
readable, writable, ignore = select.select(readfd,
writefd,
[],
timeout)
worked = set()
for r in readable:
if r is self._my_socket:
# new inbound connection request received,
# create a new Connection for it:
client_socket, client_address = self._my_socket.accept()
name = str(client_address)
conn = FakeBroker.Connection(self, client_socket, name,
self._sasl_mechanisms,
self._user_credentials)
self._connections[conn.name] = conn
elif r is self._wakeup_pipe[0]:
os.read(self._wakeup_pipe[0], 512)
else:
r.process_input()
worked.add(r)
for t in timers:
now = time.time()
if t.next_tick > now:
break
t.process(now)
conn = t.user_context
worked.add(conn)
for w in writable:
w.send_output()
worked.add(w)
# clean up any closed connections:
while worked:
conn = worked.pop()
if conn.closed:
del self._connections[conn.name]
conn.destroy()
# Shutting down
self._my_socket.close()
for conn in self._connections.itervalues():
conn.destroy()
return 0
def add_route(self, address, link):
# route from address -> link[, link ...]
if address not in self._sources:
self._sources[address] = [link]
elif link not in self._sources[address]:
self._sources[address].append(link)
def remove_route(self, address, link):
if address in self._sources:
if link in self._sources[address]:
self._sources[address].remove(link)
if not self._sources[address]:
del self._sources[address]
def forward_message(self, message):
# returns True if message was routed
dest = message.address
if dest not in self._sources:
self.dropped_count += 1
return False
LOG.debug("Forwarding [%s]", dest)
# route "behavior" determined by prefix:
if dest.startswith(self._broadcast_prefix):
self.fanout_count += 1
for link in self._sources[dest]:
LOG.debug("Broadcast to %s", dest)
link.send_message(message)
elif dest.startswith(self._group_prefix):
# round-robin:
self.topic_count += 1
link = self._sources[dest].pop(0)
link.send_message(message)
LOG.debug("Send to %s", dest)
self._sources[dest].append(link)
else:
# unicast:
self.direct_count += 1
LOG.debug("Unicast to %s", dest)
self._sources[dest][0].send_message(message)
return True
|
hkumarmk/oslo.messaging
|
tests/test_amqp_driver.py
|
Python
|
apache-2.0
| 29,574
|
#
# ImageViewCanvasQt.py -- A FITS image widget with canvas drawing in Qt
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import ImageView, Mixins
from ginga.qtw import ImageViewQt
from ginga.qtw.ImageViewCanvasTypesQt import *
class ImageViewCanvasError(ImageViewQt.ImageViewQtError):
pass
class ImageViewCanvas(ImageViewQt.ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, settings=None, render=None,
rgbmap=None, bindmap=None, bindings=None):
ImageViewQt.ImageViewZoom.__init__(self, logger=logger,
settings=settings,
render=render,
rgbmap=rgbmap,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self, drawCatalog)
self.setSurface(self)
self.ui_setActive(True)
# for displaying modal keyboard state
self.mode_obj = None
bm = self.get_bindmap()
bm.add_callback('mode-set', self.mode_change_cb)
def canvascoords(self, data_x, data_y, center=True):
# data->canvas space coordinate conversion
x, y = self.get_canvas_xy(data_x, data_y, center=center)
return (x, y)
def redraw_data(self, whence=0):
super(ImageViewCanvas, self).redraw_data(whence=whence)
if not self.pixmap:
return
self.draw()
def mode_change_cb(self, bindmap, mode, modetype):
# delete the old indicator
obj = self.mode_obj
self.mode_obj = None
if obj:
try:
self.deleteObject(obj)
except:
pass
# if not one of the standard modifiers, display the new one
if not mode in (None, 'ctrl', 'shift'):
Text = self.getDrawClass('text')
Rect = self.getDrawClass('rectangle')
Compound = self.getDrawClass('compoundobject')
xsp, ysp = 4, 6
wd, ht = self.get_window_size()
x1, y1 = wd-12*len(mode), ht-12
o1 = Text(x1, y1, mode,
fontsize=12, color='yellow')
o1.use_cc(True)
# hack necessary to be able to compute text extents _before_
# adding the object to the canvas
o1.fitsimage = self
wd, ht = o1.get_dimensions()
# yellow text on a black filled rectangle
o2 = Compound(Rect(x1-xsp, y1+ysp, x1+wd+xsp, y1-ht,
color='black',
fill=True, fillcolor='black'),
o1)
# use canvas, not data coordinates
o2.use_cc(True)
self.mode_obj = o2
self.add(o2)
return True
#END
|
bsipocz/ginga
|
ginga/qtw/ImageViewCanvasQt.py
|
Python
|
bsd-3-clause
| 3,179
|
from __future__ import absolute_import, division, print_function, unicode_literals
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from caffe2.python import core, workspace
from hypothesis import given
class TestEnsureClipped(hu.HypothesisTestCase):
@given(
X=hu.arrays(dims=[5, 10], elements=st.floats(min_value=-1.0, max_value=1.0)),
in_place=st.booleans(),
sparse=st.booleans(),
indices=hu.arrays(dims=[5], elements=st.booleans()),
**hu.gcs_cpu_only
)
def test_ensure_clipped(self, X, in_place, sparse, indices, gc, dc):
if (not in_place) and sparse:
return
param = X.astype(np.float32)
m, n = param.shape
indices = np.array(np.nonzero(indices)[0], dtype=np.int64)
grad = np.random.rand(len(indices), n)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("grad", grad)
workspace.FeedBlob("param", param)
input = ["param", "indices", "grad"] if sparse else ["param"]
output = "param" if in_place else "output"
op = core.CreateOperator("EnsureClipped", input, output, min=0.0)
workspace.RunOperatorOnce(op)
def ref():
return (
np.array(
[np.clip(X[i], 0, None) if i in indices else X[i] for i in range(m)]
)
if sparse
else np.clip(X, 0, None)
)
npt.assert_allclose(workspace.blobs[output], ref(), rtol=1e-3)
|
ryfeus/lambda-packs
|
pytorch/source/caffe2/python/operator_test/ensure_clipped_test.py
|
Python
|
mit
| 1,587
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.const.eve import EffectId
from eos.eve_obj.effect import EffectFactory
from .base import WarfareBuffEffect
class ModuleBonusWarfareLinkMining(WarfareBuffEffect):
friendly_only = True
EffectFactory.register_class_by_id(
ModuleBonusWarfareLinkMining,
EffectId.module_bonus_warfare_link_mining)
|
pyfa-org/eos
|
eos/eve_obj/effect/warfare_buff/command_mining.py
|
Python
|
lgpl-3.0
| 1,220
|
#!/usr/bin/env python
import agate
from csvkit.cli import CSVKitUtility, parse_column_identifiers
class CSVSort(CSVKitUtility):
description = 'Sort CSV files. Like the Unix "sort" command, but for tabular data.'
def add_arguments(self):
self.argparser.add_argument(
'-n', '--names', dest='names_only', action='store_true',
help='Display column names and indices from the input CSV and exit.')
self.argparser.add_argument(
'-c', '--columns', dest='columns',
help='A comma-separated list of column indices, names or ranges to sort by, e.g. "1,id,3-5". '
'Defaults to all columns.')
self.argparser.add_argument(
'-r', '--reverse', dest='reverse', action='store_true',
help='Sort in descending order.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference when parsing the input.')
def main(self):
if self.args.names_only:
self.print_column_names()
return
if self.additional_input_expected():
self.argparser.error('You must provide an input file or piped data.')
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
table = agate.Table.from_csv(
self.input_file,
skip_lines=self.args.skip_lines,
sniff_limit=sniff_limit,
column_types=self.get_column_types(),
**self.reader_kwargs
)
column_ids = parse_column_identifiers(
self.args.columns,
table.column_names,
self.get_column_offset()
)
table = table.order_by(column_ids, reverse=self.args.reverse)
table.to_csv(self.output_file, **self.writer_kwargs)
def launch_new_instance():
utility = CSVSort()
utility.run()
if __name__ == '__main__':
launch_new_instance()
|
wireservice/csvkit
|
csvkit/utilities/csvsort.py
|
Python
|
mit
| 2,282
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from lxml import etree
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import simplejson as json
from django.contrib.contenttypes.models import ContentType
from agon_ratings.models import OverallRating
from django.contrib.auth import get_user_model
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.utils import default_map_config
from geonode.base.populate_test_data import create_models
from geonode.maps.tests_populate_maplayers import create_maplayers
class MapsTest(TestCase):
"""Tests geonode.maps app/module
"""
fixtures = ['initial_data.json', 'bobby']
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
create_models(type='map')
create_models(type='layer')
create_maplayers()
default_abstract = "This is a demonstration of GeoNode, an application \
for assembling and publishing web based maps. After adding layers to the map, \
use the Save Map button above to contribute your map to the GeoNode \
community."
default_title = "GeoNode Default Map"
# This is a valid map viewer config, based on the sample data provided
# by andreas in issue 566. -dwins
viewer_config = """
{
"defaultSourceType": "gx_wmssource",
"about": {
"title": "Title",
"abstract": "Abstract"
},
"sources": {
"capra": {
"url":"http://localhost:8080/geoserver/wms"
}
},
"map": {
"projection":"EPSG:900913",
"units":"m",
"maxResolution":156543.0339,
"maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34],
"center":[-9428760.8688778,1436891.8972581],
"layers":[{
"source":"capra",
"buffer":0,
"wms":"capra",
"name":"base:nic_admin"
}],
"keywords":["saving", "keywords"],
"zoom":7
}
}
"""
viewer_config_alternative = """
{
"defaultSourceType": "gx_wmssource",
"about": {
"title": "Title2",
"abstract": "Abstract2"
},
"sources": {
"capra": {
"url":"http://localhost:8080/geoserver/wms"
}
},
"map": {
"projection":"EPSG:900913",
"units":"m",
"maxResolution":156543.0339,
"maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34],
"center":[-9428760.8688778,1436891.8972581],
"layers":[{
"source":"capra",
"buffer":0,
"wms":"capra",
"name":"base:nic_admin"
}],
"zoom":7
}
}
"""
perm_spec = {
"users": {
"admin": [
"change_resourcebase",
"change_resourcebase_permissions",
"view_resourcebase"]},
"groups": {}}
def test_map_json(self):
# Test that saving a map when not logged in gives 401
response = self.client.put(
reverse(
'map_json',
args=(
'1',
)),
data=self.viewer_config,
content_type="text/json")
self.assertEqual(response.status_code, 401)
self.client.login(username=self.user, password=self.passwd)
response = self.client.put(
reverse(
'map_json',
args=(
'1',
)),
data=self.viewer_config_alternative,
content_type="text/json")
self.assertEqual(response.status_code, 200)
map_obj = Map.objects.get(id=1)
self.assertEquals(map_obj.title, "Title2")
self.assertEquals(map_obj.abstract, "Abstract2")
self.assertEquals(map_obj.layer_set.all().count(), 1)
def test_map_save(self):
"""POST /maps/new/data -> Test saving a new map"""
new_map = reverse("new_map_json")
# Test that saving a map when not logged in gives 401
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEqual(response.status_code, 401)
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
# We have now 9 maps and 8 layers so the next pk will be 18
self.assertEquals(map_id, 18)
map_obj = Map.objects.get(id=map_id)
self.assertEquals(map_obj.title, "Title")
self.assertEquals(map_obj.abstract, "Abstract")
self.assertEquals(map_obj.layer_set.all().count(), 1)
self.assertEquals(map_obj.keyword_list(), [u"keywords", u"saving"])
self.assertNotEquals(map_obj.bbox_x0, None)
# Test an invalid map creation request
self.client.login(username=self.user, password=self.passwd)
response = self.client.post(
new_map,
data="not a valid viewer config",
content_type="text/json")
self.assertEquals(response.status_code, 400)
self.client.logout()
def test_map_fetch(self):
"""/maps/[id]/data -> Test fetching a map in JSON"""
map_obj = Map.objects.get(id=1)
map_obj.set_default_permissions()
response = self.client.get(reverse('map_json', args=(map_obj.id,)))
self.assertEquals(response.status_code, 200)
cfg = json.loads(response.content)
self.assertEquals(
cfg["about"]["abstract"],
'GeoNode default map abstract')
self.assertEquals(cfg["about"]["title"], 'GeoNode Default Map')
self.assertEquals(len(cfg["map"]["layers"]), 5)
def test_map_to_json(self):
""" Make some assertions about the data structure produced for serialization
to a JSON map configuration"""
map_obj = Map.objects.get(id=1)
cfg = map_obj.viewer_json(None)
self.assertEquals(
cfg['about']['abstract'],
'GeoNode default map abstract')
self.assertEquals(cfg['about']['title'], 'GeoNode Default Map')
def is_wms_layer(x):
if 'source' in x:
return cfg['sources'][x['source']]['ptype'] == 'gxp_wmscsource'
return False
layernames = [x['name']
for x in cfg['map']['layers'] if is_wms_layer(x)]
self.assertEquals(layernames, ['geonode:CA', ])
def test_map_to_wmc(self):
""" /maps/1/wmc -> Test map WMC export
Make some assertions about the data structure produced
for serialization to a Web Map Context Document
"""
map_obj = Map.objects.get(id=1)
map_obj.set_default_permissions()
response = self.client.get(reverse('map_wmc', args=(map_obj.id,)))
self.assertEquals(response.status_code, 200)
# check specific XPaths
wmc = etree.fromstring(response.content)
namespace = '{http://www.opengis.net/context}'
title = '{ns}General/{ns}Title'.format(ns=namespace)
abstract = '{ns}General/{ns}Abstract'.format(ns=namespace)
self.assertEquals(wmc.attrib.get('id'), '1')
self.assertEquals(wmc.find(title).text, 'GeoNode Default Map')
self.assertEquals(
wmc.find(abstract).text,
'GeoNode default map abstract')
def test_newmap_to_json(self):
""" Make some assertions about the data structure produced for serialization
to a new JSON map configuration"""
response = self.client.get(reverse('new_map_json'))
cfg = json.loads(response.content)
self.assertEquals(cfg['defaultSourceType'], "gxp_wmscsource")
def test_map_details(self):
"""/maps/1 -> Test accessing the map browse view function"""
map_obj = Map.objects.get(id=1)
map_obj.set_default_permissions()
response = self.client.get(reverse('map_detail', args=(map_obj.id,)))
self.assertEquals(response.status_code, 200)
def test_new_map_without_layers(self):
# TODO: Should this test have asserts in it?
self.client.get(reverse('new_map'))
def test_new_map_with_layer(self):
layer = Layer.objects.all()[0]
self.client.get(reverse('new_map') + '?layer=' + layer.typename)
def test_new_map_with_empty_bbox_layer(self):
layer = Layer.objects.all()[0]
self.client.get(reverse('new_map') + '?layer=' + layer.typename)
def test_ajax_map_permissions(self):
"""Verify that the ajax_layer_permissions view is behaving as expected
"""
# Setup some layer names to work with
mapid = Map.objects.all()[0].pk
invalid_mapid = "42"
def url(id):
return reverse('resource_permissions', args=[id])
# Test that an invalid layer.typename is handled for properly
response = self.client.post(
url(invalid_mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 404)
# Test that GET returns permissions
response = self.client.get(url(mapid))
assert('permissions' in response.content)
# Test that a user is required to have permissions
# First test un-authenticated
response = self.client.post(
url(mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
url(mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
url(mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
# Test that the method returns 200
self.assertEquals(response.status_code, 200)
# Test that the permissions specification is applied
def test_map_metadata(self):
"""Test that map metadata can be properly rendered
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_metadata', args=(map_id,))
# test unauthenticated user to modify map metadata
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
# test a user without metadata modify permission
self.client.login(username='norman', password='norman')
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
self.client.logout()
# Now test with a valid user using GET method
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# Now test with a valid user using POST method
self.client.login(username=self.user, password=self.passwd)
response = self.client.post(url)
self.assertEquals(response.status_code, 200)
# TODO: only invalid mapform is tested
def test_map_remove(self):
"""Test that map can be properly removed
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_remove', args=(map_id,))
# test unauthenticated user to remove map
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
# test a user without map removal permission
self.client.login(username='norman', password='norman')
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
self.client.logout()
# Now test with a valid user using GET method
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# Now test with a valid user using POST method,
# which removes map and associated layers, and redirects webpage
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver/maps/')
# After removal, map is not existent
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
# Prepare map object for later test that if it is completely removed
# map_obj = Map.objects.get(id=1)
# TODO: Also associated layers are not existent
# self.assertEquals(map_obj.layer_set.all().count(), 0)
def test_map_embed(self):
"""Test that map can be properly embedded
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_embed', args=(map_id,))
url_no_id = reverse('map_embed')
# Now test with a map id
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# The embedded map is exempt from X-FRAME-OPTIONS restrictions.
if hasattr(response, 'xframe_options_exempt'):
self.assertTrue(response.xframe_options_exempt)
# Config equals to that of the map whose id is given
map_obj = Map.objects.get(id=map_id)
config_map = map_obj.viewer_json(None)
response_config_dict = json.loads(response.context['config'])
self.assertEquals(
config_map['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_map['about']['title'],
response_config_dict['about']['title'])
# Now test without a map id
response = self.client.get(url_no_id)
self.assertEquals(response.status_code, 200)
# Config equals to that of the default map
config_default = default_map_config()[0]
response_config_dict = json.loads(response.context['config'])
self.assertEquals(
config_default['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_default['about']['title'],
response_config_dict['about']['title'])
def test_map_view(self):
"""Test that map view can be properly rendered
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_view', args=(map_id,))
# test unauthenticated user to view map
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# TODO: unauthenticated user can still access the map view
# test a user without map view permission
self.client.login(username='norman', password='norman')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.client.logout()
# TODO: the user can still access the map view without permission
# Now test with a valid user using GET method
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# Config equals to that of the map whose id is given
map_obj = Map.objects.get(id=map_id)
config_map = map_obj.viewer_json(None)
response_config_dict = json.loads(response.context['config'])
self.assertEquals(
config_map['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_map['about']['title'],
response_config_dict['about']['title'])
def test_new_map_config(self):
"""Test that new map config can be properly assigned
"""
self.client.login(username='admin', password='admin')
# Test successful new map creation
m = Map()
admin_user = get_user_model().objects.get(username='admin')
layer_name = Layer.objects.all()[0].typename
m.create_from_layer_list(admin_user, [layer_name], "title", "abstract")
map_id = m.id
url = reverse('new_map_json')
# Test GET method with COPY
response = self.client.get(url, {'copy': map_id})
self.assertEquals(response.status_code, 200)
map_obj = Map.objects.get(id=map_id)
config_map = map_obj.viewer_json(None)
response_config_dict = json.loads(response.content)
self.assertEquals(
config_map['map']['layers'],
response_config_dict['map']['layers'])
# Test GET method no COPY and no layer in params
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
config_default = default_map_config()[0]
response_config_dict = json.loads(response.content)
self.assertEquals(
config_default['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_default['about']['title'],
response_config_dict['about']['title'])
# Test GET method no COPY but with layer in params
response = self.client.get(url, {'layer': layer_name})
self.assertEquals(response.status_code, 200)
response_dict = json.loads(response.content)
self.assertEquals(response_dict['fromLayer'], True)
# Test POST method without authentication
self.client.logout()
response = self.client.post(url, {'layer': layer_name})
self.assertEquals(response.status_code, 401)
# Test POST method with authentication and a layer in params
self.client.login(username='admin', password='admin')
response = self.client.post(url, {'layer': layer_name})
# Should not accept the request
self.assertEquals(response.status_code, 400)
# Test POST method with map data in json format
response = self.client.post(
url,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
# Test methods other than GET or POST and no layer in params
response = self.client.put(url)
self.assertEquals(response.status_code, 405)
def test_rating_map_remove(self):
"""Test map rating is removed on map remove
"""
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
# Create the map
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
map_id = int(json.loads(response.content)['id'])
# Create the rating with the correct content type
ctype = ContentType.objects.get(model='map')
OverallRating.objects.create(
category=1,
object_id=map_id,
content_type=ctype,
rating=3)
# Remove the map
response = self.client.post(reverse('map_remove', args=(map_id,)))
self.assertEquals(response.status_code, 302)
# Check there are no ratings matching the removed map
rating = OverallRating.objects.filter(category=1, object_id=map_id)
self.assertEquals(rating.count(), 0)
|
RCMRD/geonode
|
geonode/maps/tests.py
|
Python
|
gpl-3.0
| 22,485
|
from enigma import eServiceCenter, eServiceReference, pNavigation, getBestPlayableServiceReference, iPlayableService, setPreferredTuner, eStreamServer
from Components.ParentalControl import parentalControl
from Components.SystemInfo import SystemInfo
from Components.config import config, configfile
from Tools.BoundFunction import boundFunction
from Tools.StbHardware import getFPWasTimerWakeup
from Tools import Notifications
from time import time
import RecordTimer
import Screens.Standby
import NavigationInstance
import ServiceReference
from Screens.InfoBar import InfoBar
from Components.Sources.StreamService import StreamServiceList
# TODO: remove pNavgation, eNavigation and rewrite this stuff in python.
class Navigation:
def __init__(self):
if NavigationInstance.instance is not None:
raise NavigationInstance.instance
NavigationInstance.instance = self
self.ServiceHandler = eServiceCenter.getInstance()
import Navigation as Nav
Nav.navcore = self
self.pnav = pNavigation()
self.pnav.m_event.get().append(self.dispatchEvent)
self.pnav.m_record_event.get().append(self.dispatchRecordEvent)
self.event = [ ]
self.record_event = [ ]
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
self.currentlyPlayingService = None
self.RecordTimer = RecordTimer.RecordTimer()
self.__wasTimerWakeup = getFPWasTimerWakeup()
self.__isRestartUI = config.misc.RestartUI.value
startup_to_standby = config.usage.startup_to_standby.value
wakeup_time_type = config.misc.prev_wakeup_time_type.value
if self.__wasTimerWakeup:
RecordTimer.RecordTimerEntry.setWasInDeepStandby()
if config.misc.RestartUI.value:
config.misc.RestartUI.value = False
config.misc.RestartUI.save()
configfile.save()
elif startup_to_standby == "yes" or self.__wasTimerWakeup and config.misc.prev_wakeup_time.value and ((wakeup_time_type == 0 or wakeup_time_type == 1) or ( wakeup_time_type == 3 and startup_to_standby == "except")):
if not Screens.Standby.inTryQuitMainloop:
Notifications.AddNotification(Screens.Standby.Standby)
if config.misc.prev_wakeup_time.value:
config.misc.prev_wakeup_time.value = 0
config.misc.prev_wakeup_time.save()
configfile.save()
def wasTimerWakeup(self):
return self.__wasTimerWakeup
def isRestartUI(self):
return self.__isRestartUI
def dispatchEvent(self, i):
for x in self.event:
x(i)
if i == iPlayableService.evEnd:
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
self.currentlyPlayingService = None
def dispatchRecordEvent(self, rec_service, event):
# print "record_event", rec_service, event
for x in self.record_event:
x(rec_service, event)
def playService(self, ref, checkParentalControl=True, forceRestart=False, adjust=True):
oldref = self.currentlyPlayingServiceOrGroup
if ref and oldref and ref == oldref and not forceRestart:
print "ignore request to play already running service(1)"
return 1
print "playing", ref and ref.toString()
if ref is None:
self.stopService()
return 0
from Components.ServiceEventTracker import InfoBarCount
InfoBarInstance = InfoBarCount == 1 and InfoBar.instance
if not checkParentalControl or parentalControl.isServicePlayable(ref, boundFunction(self.playService, checkParentalControl=False, forceRestart=forceRestart, adjust=adjust)):
if ref.flags & eServiceReference.isGroup:
oldref = self.currentlyPlayingServiceReference or eServiceReference()
playref = getBestPlayableServiceReference(ref, oldref)
print "playref", playref
if playref and oldref and playref == oldref and not forceRestart:
print "ignore request to play already running service(2)"
return 1
if not playref:
alternativeref = getBestPlayableServiceReference(ref, eServiceReference(), True)
self.stopService()
if alternativeref and self.pnav:
self.currentlyPlayingServiceReference = alternativeref
self.currentlyPlayingServiceOrGroup = ref
if self.pnav.playService(alternativeref):
print "Failed to start", alternativeref
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
return 0
elif checkParentalControl and not parentalControl.isServicePlayable(playref, boundFunction(self.playService, checkParentalControl = False)):
if self.currentlyPlayingServiceOrGroup and InfoBarInstance and InfoBarInstance.servicelist.servicelist.setCurrent(self.currentlyPlayingServiceOrGroup, adjust):
self.currentlyPlayingServiceOrGroup = InfoBarInstance.servicelist.servicelist.getCurrent()
return 1
else:
playref = ref
if self.pnav:
self.pnav.stopService()
self.currentlyPlayingServiceReference = playref
self.currentlyPlayingServiceOrGroup = ref
if InfoBarInstance and InfoBarInstance.servicelist.servicelist.setCurrent(ref, adjust):
self.currentlyPlayingServiceOrGroup = InfoBarInstance.servicelist.servicelist.getCurrent()
setPriorityFrontend = False
if SystemInfo["DVB-T_priority_tuner_available"] or SystemInfo["DVB-C_priority_tuner_available"] or SystemInfo["DVB-S_priority_tuner_available"] or SystemInfo["ATSC_priority_tuner_available"]:
str_service = playref.toString()
if '%3a//' not in str_service and not str_service.rsplit(":", 1)[1].startswith("/"):
type_service = playref.getUnsignedData(4) >> 16
if type_service == 0xEEEE:
if SystemInfo["DVB-T_priority_tuner_available"] and config.usage.frontend_priority_dvbt.value != "-2":
if config.usage.frontend_priority_dvbt.value != config.usage.frontend_priority.value:
setPreferredTuner(int(config.usage.frontend_priority_dvbt.value))
setPriorityFrontend = True
if SystemInfo["ATSC_priority_tuner_available"] and config.usage.frontend_priority_atsc.value != "-2":
if config.usage.frontend_priority_atsc.value != config.usage.frontend_priority.value:
setPreferredTuner(int(config.usage.frontend_priority_atsc.value))
setPriorityFrontend = True
elif type_service == 0xFFFF:
if SystemInfo["DVB-C_priority_tuner_available"] and config.usage.frontend_priority_dvbc.value != "-2":
if config.usage.frontend_priority_dvbc.value != config.usage.frontend_priority.value:
setPreferredTuner(int(config.usage.frontend_priority_dvbc.value))
setPriorityFrontend = True
if SystemInfo["ATSC_priority_tuner_available"] and config.usage.frontend_priority_atsc.value != "-2":
if config.usage.frontend_priority_atsc.value != config.usage.frontend_priority.value:
setPreferredTuner(int(config.usage.frontend_priority_atsc.value))
setPriorityFrontend = True
else:
if SystemInfo["DVB-S_priority_tuner_available"] and config.usage.frontend_priority_dvbs.value != "-2":
if config.usage.frontend_priority_dvbs.value != config.usage.frontend_priority.value:
setPreferredTuner(int(config.usage.frontend_priority_dvbs.value))
setPriorityFrontend = True
if self.pnav.playService(playref):
print "Failed to start", playref
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
if setPriorityFrontend:
setPreferredTuner(int(config.usage.frontend_priority.value))
return 0
elif oldref and InfoBarInstance and InfoBarInstance.servicelist.servicelist.setCurrent(oldref, adjust):
self.currentlyPlayingServiceOrGroup = InfoBarInstance.servicelist.servicelist.getCurrent()
return 1
def getCurrentlyPlayingServiceReference(self):
return self.currentlyPlayingServiceReference
def getCurrentlyPlayingServiceOrGroup(self):
return self.currentlyPlayingServiceOrGroup
def recordService(self, ref, simulate=False):
service = None
if not simulate: print "recording service: %s" % (str(ref))
if isinstance(ref, ServiceReference.ServiceReference):
ref = ref.ref
if ref:
if ref.flags & eServiceReference.isGroup:
ref = getBestPlayableServiceReference(ref, eServiceReference(), simulate)
service = ref and self.pnav and self.pnav.recordService(ref, simulate)
if service is None:
print "record returned non-zero"
return service
def stopRecordService(self, service):
ret = self.pnav and self.pnav.stopRecordService(service)
return ret
def getRecordings(self, simulate=False):
recs = self.pnav and self.pnav.getRecordings(simulate)
if not simulate and StreamServiceList:
for rec in recs[:]:
if rec.__deref__() in StreamServiceList:
recs.remove(rec)
return recs
def getCurrentService(self):
if not self.currentlyPlayingService:
self.currentlyPlayingService = self.pnav and self.pnav.getCurrentService()
return self.currentlyPlayingService
def stopService(self):
if self.pnav:
self.pnav.stopService()
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
def pause(self, p):
return self.pnav and self.pnav.pause(p)
def shutdown(self):
self.RecordTimer.shutdown()
self.ServiceHandler = None
self.pnav = None
def stopUserServices(self):
self.stopService()
def getClientsStreaming(self):
return eStreamServer.getInstance() and eStreamServer.getInstance().getConnectedClients()
|
ACJTeam/enigma2
|
Navigation.py
|
Python
|
gpl-2.0
| 9,282
|
import os.path
import pitch_histogram
import mfccs
import event_histogram
file = './features.csv'
def featurize(releases):
releases = [str(x) for x in releases]
if not os.path.isfile(file):
features = open(file, 'a')
features.write('release,tatum_distribution,pitch_distribution,mel_frequency_cepstrum_coefficients\n')
else:
features = open(file, 'a')
for release in releases:
try:
result = [release]
result += [float(x) for x in list(event_histogram.get_event_histogram(release))]
result += [float(x) for x in list(pitch_histogram.get_pitch_histogram(release))]
mfs = mfccs.get_mfccs(release)
mfs = list(mfs[0]) + list(mfs[1])
result += [float(x) for x in mfs]
line = ','.join([str(x) for x in result]) + '\n'
features.write(line)
except:
features.write(release + " failed featurization\n")
features.close()
to_run_on = pitch_histogram.keys.keys()
to_run_on = [float(x) for x in to_run_on]
to_run_on.sort()
to_run_on = [str(x) for x in to_run_on]
|
lathertonj/RemixNoveltyRanker
|
Code/featurize.py
|
Python
|
gpl-2.0
| 1,137
|
# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""utilities methods and classes for checkers
Base id of standard checkers (used in msg and report ids):
01: base
02: classes
03: format
04: import
05: misc
06: variables
07: exceptions
08: similar
09: design_analysis
10: newstyle
11: typecheck
12: logging
13: string_format
14-50: not yet used: reserved for future internal checkers.
51-99: perhaps used: reserved for external checkers
The raw_metrics checker has no number associated since it doesn't emit any
messages nor reports. XXX not true, emit a 07 report !
"""
import tokenize
from os import listdir
from os.path import dirname, join, isdir, splitext
from logilab.astng.utils import ASTWalker
from logilab.common.configuration import OptionsProviderMixIn
from pylint.reporters import diff_string, EmptyReport
def table_lines_from_stats(stats, old_stats, columns):
"""get values listed in <columns> from <stats> and <old_stats>,
and return a formated list of values, designed to be given to a
ureport.Table object
"""
lines = []
for m_type in columns:
new = stats[m_type]
format = str
if isinstance(new, float):
format = lambda num: '%.3f' % num
old = old_stats.get(m_type)
if old is not None:
diff_str = diff_string(old, new)
old = format(old)
else:
old, diff_str = 'NC', 'NC'
lines += (m_type.replace('_', ' '), format(new), old, diff_str)
return lines
class BaseChecker(OptionsProviderMixIn, ASTWalker):
"""base class for checkers"""
# checker name (you may reuse an existing one)
name = None
# options level (0 will be displaying in --help, 1 in --long-help)
level = 1
# ordered list of options to control the ckecker behaviour
options = ()
# messages issued by this checker
msgs = {}
# reports issued by this checker
reports = ()
def __init__(self, linter=None):
"""checker instances should have the linter as argument
linter is an object implementing ILinter
"""
ASTWalker.__init__(self, self)
self.name = self.name.lower()
OptionsProviderMixIn.__init__(self)
self.linter = linter
# messages that are active for the current check
self.active_msgs = set()
def add_message(self, msg_id, line=None, node=None, args=None):
"""add a message of a given type"""
self.linter.add_message(msg_id, line, node, args)
def package_dir(self):
"""return the base directory for the analysed package"""
return dirname(self.linter.base_file)
# dummy methods implementing the IChecker interface
def open(self):
"""called before visiting project (i.e set of modules)"""
def close(self):
"""called after visiting project (i.e set of modules)"""
class BaseRawChecker(BaseChecker):
"""base class for raw checkers"""
def process_module(self, node):
"""process a module
the module's content is accessible via the stream object
stream must implement the readline method
"""
stream = node.file_stream
stream.seek(0)
self.process_tokens(tokenize.generate_tokens(stream.readline))
def process_tokens(self, tokens):
"""should be overridden by subclasses"""
raise NotImplementedError()
PY_EXTS = ('.py', '.pyc', '.pyo', '.pyw', '.so', '.dll')
def initialize(linter):
"""initialize linter with checkers in this package """
package_load(linter, __path__[0])
def package_load(linter, directory):
"""load all module and package in the given directory, looking for a
'register' function in each one, used to register pylint checkers
"""
globs = globals()
imported = {}
for filename in listdir(directory):
basename, extension = splitext(filename)
if basename in imported or basename == '__pycache__':
continue
if extension in PY_EXTS and basename != '__init__' or (
not extension and basename != 'CVS' and
isdir(join(directory, basename))):
try:
module = __import__(basename, globs, globs, None)
except ValueError:
# empty module name (usually emacs auto-save files)
continue
except ImportError, exc:
import sys
print >> sys.stderr, "Problem importing module %s: %s" % (filename, exc)
else:
if hasattr(module, 'register'):
module.register(linter)
imported[basename] = 1
__all__ = ('CheckerHandler', 'BaseChecker', 'initialize', 'package_load')
|
isohybrid/dotfile
|
vim/bundle/git:--github.com-klen-python-mode/pylibs/pylint/checkers/__init__.py
|
Python
|
bsd-2-clause
| 5,491
|
from operator import add
from flask import render_template
from markdown import markdown
from warmsea.models import SrmRound
def index():
rounds = SrmRound.query.order_by('date')
problems = []
for problems_in_a_round in [list(r.problems.order_by('name')) for r in rounds]:
problems += problems_in_a_round
problems = [
{
'id': pm.id,
'name': pm.name,
'rounds': [{
'id': r.id,
'name': r.name,
'topcoder_url': r.topcoder_url
} for r in pm.rounds],
'related_posts_html': ', '.join(
['<a href="/blog/post/%d/" title="%s">#%d<a>' % (post.id, post.title, post.id)
for post in pm.related_posts]),
'stars_html': '<i class="fa fa-star"></i>'.join([''] * (pm.stars + 1)),
'categories': [
{
'id': c.id,
'name': c.name
} for c in pm.categories
],
'note': markdown(pm.note.strip(), ['extra']),
'topcoder_url': pm.topcoder_url
} for pm in problems
]
context = {
'problems': problems
}
return render_template('srm/index.html', **context)
|
warmsea/warmsea.net
|
warmsea/srm/views.py
|
Python
|
mit
| 1,263
|
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from core.models import TimeStampedModel
from accounts.models import Account
class Board(models.Model):
def __str__(self):
return 'Board Name: ' + self.name
def get_absolute_url(self):
return reverse('board:post_list', args=[self.slug])
slug = models.CharField(default='', unique=True, max_length=100)
name = models.CharField(default='', max_length=100)
posts_chunk_size = models.IntegerField(default=10)
post_pages_nav_chunk_size = models.IntegerField(default=10)
comments_chunk_size = models.IntegerField(default=5)
comment_pages_nav_chunk_size = models.IntegerField(default=10)
class PostQuerySet(models.QuerySet):
def search(self, search_flag, query):
if search_flag == 'TITLE':
return self.filter(title__contains=query)
elif search_flag == 'CONTENT':
return self.filter(content__contains=query)
elif search_flag == 'BOTH':
return self.filter(Q(title__contains=query) | Q(content__contains=query))
else:
return self.all()
def remain(self):
return self.filter(is_deleted=False)
def board(self, board):
return self.filter(board=board)
class PostManager(models.Manager):
def get_queryset(self):
return PostQuerySet(self.model, using=self._db)
def search(self, search_flag, query):
return self.get_queryset().search(search_flag, query)
def remain(self):
return self.get_queryset().remain()
def board(self, board):
return self.get_queryset().board(board)
class Post(TimeStampedModel):
def __str__(self):
return 'Post Title: ' + self.title
SEARCH_FLAG = [
('TITLE', '제목'),
('CONTENT', '내용'),
('BOTH', '제목+내용')
]
objects = PostManager()
title = models.CharField(blank=False, max_length=100)
content = models.TextField(default='')
board = models.ForeignKey(Board, null=True)
is_deleted = models.BooleanField(default=False)
page_view_count = models.IntegerField(default=0)
like_count = models.IntegerField(default=0)
account = models.ForeignKey(Account, null=True)
ip = models.GenericIPAddressField(null=True, default='')
def get_absolute_url(self):
return reverse('board:view_post', args=[self.id])
class EditedPostHistory(TimeStampedModel):
post = models.ForeignKey(Post, null=False, default=None)
title = models.CharField(default='', max_length=100)
content = models.TextField(default='')
ip = models.GenericIPAddressField(null=True, default='')
class Attachment(models.Model):
post = models.ForeignKey(Post, null=True)
editedPostHistory = models.ForeignKey(EditedPostHistory, null=True, default=None)
attachment = models.FileField(blank=True, null=True)
class Comment(TimeStampedModel):
content = models.TextField(default='')
post = models.ForeignKey(Post, null=True)
is_deleted = models.BooleanField(default=False)
account = models.ForeignKey(Account, null=True)
ip = models.GenericIPAddressField(null=True, default='')
|
hyesun03/k-board
|
kboard/board/models.py
|
Python
|
mit
| 3,211
|
#
# This file is part of Bluepass. Bluepass is Copyright (c) 2012-2013
# Geert Jansen.
#
# Bluepass is free software available under the GNU General Public License,
# version 3. See the file LICENSE distributed with this file for the exact
# licensing terms.
from __future__ import absolute_import, print_function
import six
import base64
import binascii
Error = binascii.Error
# Types are as follows:
#
# Encode: bytes -> str
# Decode: str -> bytes
#
# Note this is different from the stdlib where base64 always works on bytes,
# and returns bytes. The types below make more sense to us because we always
# store the result of a base64 encoding in a dict that will be converted to
# JSON, which is unicode based.
def encode(b):
"""Encode a string into base-64 encoding."""
if not isinstance(b, six.binary_type):
raise TypeError('expecting bytes')
return base64.b64encode(b).decode('ascii')
def decode(s):
"""Decode a base-64 encoded string."""
if not isinstance(s, six.string_types):
raise TypeError('expecting string')
try:
return base64.b64decode(s)
except binascii.Error as e:
raise ValueError(str(e))
def check(s):
"""Check that `s' is a properly encoded base64 string."""
if not isinstance(s, six.string_types):
raise TypeError('expecting string')
try:
base64.b64decode(s)
except binascii.Error:
return False
return True
def try_decode(s):
"""Decode a base64 string and return None if there was an error."""
try:
return decode(s)
except binascii.Error:
pass
|
geertj/bluepass
|
bluepass/base64.py
|
Python
|
gpl-3.0
| 1,605
|
#copyright ReportLab Europe Limited. 2000-2012
#see license.txt for license details
import os, sys
import unittest
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
from reportlab.pdfgen import canvas
from reportlab.lib import pdfencrypt
def makedoc(fileName, userPass="User", ownerPass="Owner"):
"""
Creates a simple encrypted pdf.
"""
encrypt = pdfencrypt.StandardEncryption(userPass, ownerPass)
encrypt.setAllPermissions(0)
encrypt.canPrint = 1
c = canvas.Canvas(fileName)
c._doc.encrypt = encrypt
c.drawString(100, 500, "hello world")
c.save()
def parsedoc(fileName):
"""
Using PDFParseContext object from Pagecatcher module to check for encryption.
"""
try:
from rlextra.pageCatcher.pageCatcher import PDFParseContext
except ImportError:
return
pdfContent = open(fileName, 'rb').read()
p = PDFParseContext(pdfContent, prefix="PageForms")
p.parse()
assert p.encrypt
class ManualTestCase(unittest.TestCase):
"Runs manual encrypted file builders."
def test(self):
filepath = outputfile('test_pdfencryption.pdf')
makedoc(filepath)
parsedoc(filepath)
def makeSuite():
return makeSuiteForClasses(ManualTestCase)
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
|
nickpack/reportlab
|
tests/test_pdfencryption.py
|
Python
|
bsd-3-clause
| 1,443
|
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import numpy as np
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse import SparseDataFrame, SparseSeries
from pandas.sparse.array import SparseArray
from pandas._sparse import IntIndex
from pandas.core.categorical import Categorical
from pandas.core.common import notnull, _ensure_platform_int, _maybe_promote
from pandas.core.groupby import get_group_index, _compress_group_index
import pandas.core.common as com
import pandas.algos as algos
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 2
b 3 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = com.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = _compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = com.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
values = [ Categorical.from_array(values[:,i],
categories=self.is_categorical.categories,
ordered=True)
for i in range(values.shape[-1]) ]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
# if our mask is all True, then we can use our existing dtype
if self.mask.all():
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in range(values.shape[1]):
chunk = new_values[:, i * width: (i + 1) * width]
mask_chunk = new_mask[:, i * width: (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels,
labels=result_labels,
names=self.new_index_names,
verify_integrity=False)
def _unstack_multiple(data, clocs):
from pandas.core.groupby import decons_obs_group_ids
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = _compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids,
obs_ids, shape, clabels, xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = Series(data.values, index=dummy_index)
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [val if i > val else val - 1 for val in clocs]
return result
dummy = DataFrame(data.values, index=dummy_index,
columns=data.columns)
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index,
self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sortlevel(0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level)
return unstacker.get_result()
def _unstack_frame(obj, level):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.ix[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns)
return unstacker.get_result()
def get_compressed_ids(labels, sizes):
from pandas.core.groupby import get_group_index
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return _compress_group_index(ids, sort=True)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
cat = Categorical(index, ordered=True)
return cat.categories, cat.codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = \
zip(*map(factorize, (frame.index, frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels,
labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level numbers, "
"not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something
we can safely pass to swaplevel:
We generally want to convert the level number into
a level name, except when columns do not have names,
in which case we must leave as a level number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sortlevel(level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[
lev.take(lab) for lev, lab in
zip(this.columns.levels[:-1], this.columns.labels[:-1])
]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
slice_len = loc.stop - loc.start
# can make more efficient?
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.ix[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level_num])
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def melt(frame, id_vars=None, value_vars=None,
var_name=None, value_name='value', col_level=None):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> pd.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
# TODO: what about the existing index?
if id_vars is not None:
if not isinstance(id_vars, (tuple, list, np.ndarray)):
id_vars = [id_vars]
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not isinstance(value_vars, (tuple, list, np.ndarray)):
value_vars = [value_vars]
frame = frame.ix[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i for i in
range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns.get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2008], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team hr year
0 Red Sox 514 2007
1 Yankees 573 2007
2 Red Sox 545 2008
3 Yankees 526 2008
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
mdata[target] = com._concat_compat([data[col].values for col in names])
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notnull(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : list
A list of stub names. The wide format variables are assumed to
start with the stub names.
i : str
The name of the id variable.
j : str
The name of the subobservation variable.
stubend : str
Regex to match for the end of the stubs.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable as well as
variables for i and j.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> wide_to_long(df, ["A", "B"], i="id", j="year")
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
Notes
-----
All extra variables are treated as extra id variables. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, regex):
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^" + stub)
newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
except ValueError:
pass
newdf[j] = newdf_j
return newdf
id_vars = get_var_names(df, "^(?!%s)" % "|".join(stubnames))
if i not in id_vars:
id_vars += [i]
newdf = melt_stub(df, stubnames[0], id_vars, j)
for stub in stubnames[1:]:
new = melt_stub(df, stub, id_vars, j)
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
>>> get_dummies(df, prefix=['col1', 'col2']):
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
See also ``Series.str.get_dummies``.
"""
from pandas.tools.merge import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(include=['object',
'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did "
"not match the length of the columns "
"being encoded ({2}).")
if com.is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False, sparse=False):
# Series avoids inconsistent NaN handling
cat = Categorical.from_array(Series(data), ordered=True)
levels = cat.categories
# if all NaN
if not dummy_na and len(levels) == 0:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index)
codes = cat.codes.copy()
if dummy_na:
codes[codes == -1] = len(cat.categories)
levels = np.append(cat.categories, np.nan)
number_of_cols = len(levels)
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v)
for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [ [] for _ in range(len(dummy_cols)) ]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs)), sparse_index=IntIndex(N, ixs),
fill_value=0)
sparse_series[col] = SparseSeries(data=sarr, index=index)
return SparseDataFrame(sparse_series, index=index, columns=dummy_cols)
else:
dummy_mat = np.eye(number_of_cols).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {
'major': 0,
'minor': 1
}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
cat = Categorical.from_array(mapped_items.take(labels), ordered=True)
labels = cat.codes
items = cat.categories
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/core/reshape.py
|
Python
|
artistic-2.0
| 39,134
|
#!/usr/bin/env python
'Unit test for import_relative'
import inspect, os, sys, unittest
top_builddir = os.path.join(os.path.dirname(__file__), '..')
if top_builddir[-1] != os.path.sep:
top_builddir += os.path.sep
sys.path.insert(0, top_builddir)
from import_relative import *
def true(): return true
class TestImportRelative(unittest.TestCase):
def test_path2abspath(self):
self.assertEqual(path2abspath('.', 2),
path2abspath('..test', 2))
self.assertEqual(path2abspath('test-basic.py', 2),
path2abspath('.test-basic.py', 2))
parent_dir = os.path.abspath(os.path.join(get_srcdir(), os.pardir))
basename = os.path.basename(parent_dir)
import_rel = '...%s' % basename
self.assertEqual(parent_dir,
path2abspath(import_rel, 2))
return
def test_basic(self):
"""Basic sanity testing."""
test_basic = import_relative('test-basic')
self.assertTrue(inspect.ismodule(test_basic),
'import_relative should return a module type')
filename = os.path.join(get_srcdir(), 'test-basic.py')
self.assertEqual(os.path.sep, filename[0],
'get_srcdir should return an absolute path name')
check_fn = test_basic.__file__
if (check_fn.endswith(".pyc") or check_fn.endswith(".pyo")):
check_fn = check_fn[:-1]
pass
self.assertEqual(filename, check_fn,
'import_relative should set __file__ correctly')
self.assertEqual('test-basic', test_basic.__name__,
'import_relative should set __name__ correctly')
self.assertTrue(test_basic.true(),
'should be able to use fns inside returned module')
self.assertTrue('test-basic' in sys.modules)
ir = import_relative('import_relative', os.pardir)
os2_path = ir.import_relative('os2.path')
self.assertTrue('test.os2.path', os2_path.me)
ir = import_relative('import_relative', '..')
os2_path = ir.import_relative('os2.path')
self.assertTrue('test.os2.path', os2_path.me)
tb = import_relative('test-basic', '..test')
self.assertTrue(tb.true)
return
pass
if __name__ == '__main__':
unittest.main()
|
rocky/pyimport-relative
|
test/test-basic.py
|
Python
|
gpl-3.0
| 2,380
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.