prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# -*- coding: utf-8 -*- # ****************************************************************************** # # Copyright (C) 2008-2010 Olivier Tilloy <olivier@tilloy.net> # # This file is part of the pyexiv2 distribution. # # pyexiv2 is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # pyexiv2 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyexiv2; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA. # # Author: Olivier Tilloy <olivier@tilloy.net> # # ****************************************************************************** import unittest from pyexiv2.utils import Rational class TestRational(unittest.TestCase): def test_constructor(self): r = Rational(2, 1) self.assertEqual(r.numerator, 2) self.assertEqual(r.denominator, 1) self.assertRaises(ZeroDivisionError, Rational, 1, 0) def test_read_only(self): r = Rational(3, 4) try: r.numerator = 5 except AttributeError: pass else: self.fail('Numerator is not read-only.') try: r.denominator = 5 except AttributeError: pass else: self.fail('Denominator is not read-only.') def test_match_string(self): self.assertEqual(Rational.match_string('4/3'), (4, 3)) self.assertEqual(Rational.match_string('-4/3'), (-4, 3)) self.assertEqual(Rational.match_string('0/3'), (0, 3)) self.assertEqual(Rational.match_string('0/0'), (0, 0)) self.assertRaises(ValueError, Rational.match_string, '+3/5') self.assertRaises(ValueError, Rational.match_string, '3 / 5') self.assertRaises(ValueError, Rational.match_string, '3/-5') self.assertRaises(ValueError, Rational.match_string, 'invalid') def test_from_string(self): self.assertEqual(Rational.from_string('4/3'), Rational(4, 3)) self.assertEqual(Rational.from_string('-4/3'), Rational(-4, 3)) self.assertRaises(ValueError, Rational.from_string, '+3/5') self.assertRaises(ValueError, Rational.from_string, '3 / 5') self.assertRaises(ValueError, Rational.from_string, '3/-5') self.assertRaises(ValueError, Rational.from_string, 'invalid')
self.assertRaises(ZeroDivisionError, Rational.from_string, '1/0') self.assertRaises(ZeroDivisionError, Rational.from_string, '0/0') def test_to_string(self): self.assertEqual(str(Rational(3, 5)), '3/5') self.assertEqual(str(Rational(-3, 5)), '-3/5') def test_repr(self): self.assertEqual(repr(Rational(3, 5)), 'Rational(3, 5)') self.assertEqual(repr(Rational(-3, 5)), 'Rational(-3, 5)') s
elf.assertEqual(repr(Rational(0, 3)), 'Rational(0, 3)') def test_to_float(self): self.assertEqual(Rational(3, 6).to_float(), 0.5) self.assertEqual(Rational(11, 11).to_float(), 1.0) self.assertEqual(Rational(-2, 8).to_float(), -0.25) self.assertEqual(Rational(0, 3).to_float(), 0.0) def test_equality(self): r1 = Rational(2, 1) r2 = Rational(2, 1) r3 = Rational(8, 4) r4 = Rational(3, 2) self.assertEqual(r1, r2) self.assertEqual(r1, r3) self.assertNotEqual(r1, r4)
import unittest import ray from ray.rllib.agents.a3c import A2CTrainer from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \ STEPS_TRAINED_COUNTER from ray.rllib.utils.test_utils import framework_iterator class TestDistributedExecution(unittest.TestCase): """General tests for the distributed execution API.""" @classmethod def setUpClass(cls): ray.init(num_cpus=4) @classmethod def tearDownClass(cls): ray.shutdown() def test_exec_plan_stats(ray_start_regular): for fw in framework_iterator(frameworks=("torch", "tf")): trainer = A2CTrainer( env="CartPole-v0", config={ "min_iter_time_s": 0, "framework": fw, }) result = trainer.train() assert isinstance(result, dict) assert "info" in result assert "learner" in result["info"] assert STEPS_SAMPLED_COUNTER in result["info"] assert STEPS_TRAINED_COUNTER in result["info"] assert "timers" in result assert "learn_time_ms" in result["timers"] assert "learn_throughput" in result["timers"]
assert "sample_time_ms" in result["timers"] assert "sample_throughput" in result["timer
s"] assert "update_time_ms" in result["timers"] def test_exec_plan_save_restore(ray_start_regular): for fw in framework_iterator(frameworks=("torch", "tf")): trainer = A2CTrainer( env="CartPole-v0", config={ "min_iter_time_s": 0, "framework": fw, }) res1 = trainer.train() checkpoint = trainer.save() for _ in range(2): res2 = trainer.train() assert res2["timesteps_total"] > res1["timesteps_total"], \ (res1, res2) trainer.restore(checkpoint) # Should restore the timesteps counter to the same as res2. res3 = trainer.train() assert res3["timesteps_total"] < res2["timesteps_total"], \ (res2, res3) if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__]))
ient import SAMPIntegratedClient from ..errors import SAMPProxyError # By default, tests should not use the internet. from .. import conf from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY def setup_module(module): conf.use_internet = False class TestStandardProfile: @property def hub_init_kwargs(self): return {} @property def client_init_kwargs(self): return {} @property def client_connect_kwargs(self): return {} def setup_method(self, method): self.tmpdir = tempfile.mkdtemp() self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1, **self.hub_init_kwargs) self.hub.start() self.client1 = SAMPIntegratedClient(**self.client_init_kwargs) self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs) self.client2 = SAMPIntegratedClient(**self.client_init_kwargs) self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs) def teardown_method(self, method): if self.client1.is_connected: self.client1.disconnect() if self.client2.is_connected: self.client2.disconnect() self.hub.stop() def test_main(self): self.client1_id = self.client1.get_public_id() self.client2_id = self.client2.get_public_id() self.metadata1 = {"samp.name": "Client 1", "samp.description.text": "Client 1 Description", "client.version": "1.1"} self.metadata2 = {"samp.name": "Client 2", "samp.description.text": "Client 2 Description", "client.version": "1.2"} # Check that the clients are connected assert self.client1.is_connected assert self.client2.is_connected # Check that ping works self.client1.ping() self.client2.ping() # Check that get_registered_clients works as expected. assert self.client1_id not in self.client1.get_registered_clients() assert self.client2_id in self.client1.get_registered_clients() assert self.client1_id in self.client2.get_registered_clients() assert self.client2_id not in self.client2.get_registered_clients() # Check that get_metadata works as expected assert self.client1.get_metadata(self.client1_id) == {} assert self.client1.get_metadata(self.client2_id) == {} assert self.client2.get_metadata(self.client1_id) == {} assert self.client2.get_metadata(self.client2_id) == {} self.client1.declare_metadata(self.metadata1) assert self.client1.get_metadata(self.client1_id) == self.metadata1 assert self.client2.get_metadata(self.client1_id) == self.metadata1 assert self.client1.get_metadata(self.client2_id) == {} assert self.client2.get_metadata(self.client2_id) == {} self.client2.declare_metadata(self.metadata2) assert self.client1.get_metadata(self.client1_id) == self.metadata1 assert self.client2.get_metadata(self.client1_id) == self.metadata1 assert self.client1.get_metadata(self.client2_id) == self.metadata2 assert self.client2.get_metadata(self.client2_id) == self.metadata2 # Check that, without subscriptions, sending a notification from one # client to another raises an error. message = {} message['samp.mtype'] = "table.load.votable" message['samp.params'] = {} with pytest.raises(SAMPProxyError): self.client1.notify(self.client2_id, message) # Check that there are no currently active subscriptions assert self.client1.get_subscribed_clients('table.load.votable') == {} assert self.client2.get_subscribed_clients('table.load.votable') == {} # We now test notifications and calls rec1 = Receiver(self.client1) rec2 = Receiver(self.client2) self.client2.bind_receive_notification('table.load.votable', rec2.receive_notification) self.client2.bind_receive_call('table.load.votable', rec2.receive_call) self.client1.bind_receive_response('test-tag', rec1.receive_response) # Check resulting subscriptions assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}} assert self.client2.get_subscribed_clients('table.load.votable') == {} assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id) assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id) # Once we have finished with the calls and notifications, we will # check the data got across correctly. # Test notify params = random_params(self.tmpdir) self.client1.notify(self.client2.get_public_id(), {'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.enotify(self.client2.get_public_id(), "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test notify_all params = random_params(self.tmpdir) self.client1.notify_all({'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.enotify_all("table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call params = random_params(self.tmpdir) self.client1.call(self.client2.get_public_id(), 'test-tag', {'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.t
mpdir) self.client1.ecall(self.client2.get_public_id(), 'test-tag', "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call_all params = random_params(self.tmpdir)
self.client1.call_all('tag1', {'samp.mtype': 'table.load.votable', 'samp.params': params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.ecall_all('tag2', "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call_and_wait params = random_params(self.tmpdir) result = self.client1.call_and_wait(self.client2.get_public_id(), {'samp.mtype': 'table.load.votable', 'samp.params': params}, timeout=5) assert result == TEST_REPLY assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) result = self.client1.ecall_and_wait(self.client2.get_public_id(), "table.load.votab
#!/usr/bin/python # # author: # # date: # description: # '''Trains a memory network on the bAbI dataset. References: - Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush, "Towards AI-Complete Question a1ing: A Set of Prerequisite Toy Tasks", http://arxiv.org/abs/1502.05698 - Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus, "End-To-End Memory Networks", http://arxiv.org/abs/1503.08895 Reaches 98.6% accuracy on task 'single_supporting_fact_10k' after 120 epochs. Time per epoch: 3s on CPU (core i7). ''' from __future__ import print_function from keras.models import Sequential from keras.layers.embeddings import Embedding from keras.layers import Activation, Dense, Merge, Permute, Dropout from keras.layers import LSTM, SimpleRNN, Input from keras.layers.core import Flatten from keras.utils.data_utils import get_file from functools import reduce import tarfile from data import get_stories, vectorize_stories path = 'babi-tasks-v1-2.tar.gz' #origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz') tar = tarfile.open(path) challenges = { # QA1 with 10,000 samples 'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt', # QA2 with 10,000 samples 'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt', } challenge_type = 'single_supporting_fact_10k' challenge = challenges[challenge_type] print('Extracting stories for the challenge:', challenge_type) train_stories = get_stories(tar.extractfile(challenge.format('train'))) test_stories = get_stories(tar.extractfile(challenge.format('test'))) vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [a1]) for story, q, a1 in train_stories + test_stories))) # Reserve 0 for masking via pad_sequences vocab_size = len(vocab) + 1 story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories))) query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories))) print('-') print('Vocab size:', vocab_size, 'unique words') print('Story max length:', story_maxlen, 'words') print('Query max length:', query_maxlen, 'words') print('Number of training stories:', len(train_stories)) print('Number of test stories:', len(test_stories)) print('-') print('Here\'s what a "story" tuple looks like (input, query, a1):') print(train_stories[0]) print('-') print('Vectorizing the word sequences...') word_idx = dict((c, i + 1) for i, c in enumerate(vocab)) inputs_train, queries_train, a1s_train = vectorize_stories(train_stories, word_idx, story_maxlen, query_maxlen) inputs_test, queries_test, a1s_test = vectorize_stories(test_stories, word_idx, story_maxlen, query_maxlen) print('-') print('inputs: integer tensor of shape (samples, max_length)') print('inputs_train shape:', inputs_train.shape) print('inputs_test shape:', inputs_test.shape) print('-') print('queries: integer tensor of shape (samples, max_length)') print('queries_train shape:', queries_train.shape) print('queries_test shape:', queries_test.shape) print('-') print('a1s: binary (1 or 0) tensor of shape (samples, vocab_size)') print('a1s_train shape:', a1s_train.shape) print('a1s_test shape:', a1s_test.shape) print('-') print('Com
piling...') print(inputs_train.shape) print(queries_train.shape) X = Input(shape=(story_maxlen,), dtype="int32") Q = Input(shape=(query_maxlen,), dtype="int32") embedding_dim = story_maxlen # embed the input sequence into a sequence of vectors m1 = Sequential() m1.add(Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=story_maxlen)(X)) # output: (samples, sto
ry_maxlen, embedding_dim) # embed the question into a sequence of vectors u1 = Sequential() u1.add(Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=query_maxlen)(Q)) # output: (samples, query_maxlen, embedding_dim) # compute a 'w1' between input sequence elements (which are vectors) # and the question vector sequence w1 = Sequential() w1.add(Merge([m1, u1], mode='dot', dot_axes=[2, 2])) #w1.add(Activation('softmax')) # output: (samples, story_maxlen, query_maxlen) # embed the input into a single vector with size = story_maxlen: c1 = Sequential() c1.add(Embedding(input_dim=vocab_size, output_dim=query_maxlen, input_length=story_maxlen)(X)) # output: (samples, story_maxlen, query_maxlen) # sum the w1 vector with the input vector: o1 = Sequential() o1.add(Merge([w1, c1], mode='sum')) # output: (samples, story_maxlen, query_maxlen) o1.add(Permute((2, 1))) # output: (samples, query_maxlen, story_maxlen) #u2 = Sequential() #u2.add(Merge([o1, u1], mode='sum')) #m2 = Sequential() #m2.add(Embedding(input_dim=vocab_size, #output_dim=embedding_dim, #input_length=story_maxlen)) #w2 = Sequential() #w2.add(Merge([m2, u2], mode='dot', dot_axes=[2, 2])) #c2 = Sequential() #c2.add(Embedding(input_dim=vocab_size, #output_dim=query_maxlen, #input_length=story_maxlen)) #o2 = Sequential() #o2.add(Merge([w2, c2], mode='sum')) #o2.add(Permute((2, 1))) # concatenate the w1 vector with the question vector, # and do logistic regression on top a1 = Sequential() a1.add(Merge([o1, u1], mode='sum')) a1.add(Flatten()) # why not in original format? # one regularization layer -- more would probably be needed. a1.add(Dense(vocab_size)) # we output a probability distribution over the vocabulary a1.add(Activation('softmax')) a1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # Note: you could use a Graph model to avoid repeat the input twice a1.fit([inputs_train, queries_train], a1s_train, batch_size=512, nb_epoch=10, validation_data=([inputs_test, queries_test], a1s_test)) from keras.utils.visualize_util import plot if __name__ == "__main__" and False: plot(a1, to_file='model.png') json_model = a1.to_json() with open("model.json", "w") as fh: fh.write(json_model) a1.save_weights("rnn_weights.h5")
# encoding: utf-8 from yast impo
rt import_module import_module('UI') from yast import * class Heading2Client: def main(se
lf): UI.OpenDialog( VBox( Heading("This Is a Heading."), Label("This is a Label."), PushButton("&OK") ) ) UI.UserInput() UI.CloseDialog() Heading2Client().main()
# -*- coding: utf-8 -*- """Copyright (C) 2013 COLDWELL AG This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re import time from ... import hoster @hoster.host class this: model = hoster.HttpPremiumHoster name = 'junocloud.me' patterns = [ hoster.Matcher('https?', '*.junocloud.me', '!/<id>'), ] max_filesize_free = hoster.GB(2) max_filesize_premium = hoster.GB(2) url_template = 'http://junocloud.me/{id}' login_url = 'http://junocloud.me/login.html' account_url = 'http://junocloud.me/account.html' def boot_account(account): account.set_user_agent() account.cookies["lang"] = "english" if account.username is None: return data = { 'op': 'login', 'redirect': this.account_url, 'login': account.username, 'password': account.password, 'loginFormSubmit': 'Login', } resp = account.post(this.login_url, data=data) if resp.url != this.account_url: account.login_failed() return return resp def on_initialize_account(account): resp = boot_account(account) if resp: status = resp.soup.find('div', text=lambda a: 'Status:' in a if a else False).find_next('div').find('strong').text.strip() if status != 'Premium': account.premium = False return raise NotImplementedError('premium is not implemented') def check_errors(ctx, resp): if 'The origin web server timed out responding to this request.' in resp.text: ctx.maintenance(180) h1 = resp.soup.find('h1') if h1: if 'File Not Found' in h1.text or '404 Not Found' in h1.text: ctx.set_offline() def on_check_http(file, resp): check_errors(file, resp) name = resp.soup.find('input', attrs={'name': 'fname'}).get('value').strip() size = resp.soup.find('p', 'request_filesize').text.strip().split(' ', 1)[1].strip() file.set_infos(name=name, size=size) def on_download_premium(chunk): raise NotImplementedEr
ror('premium is untested') def on_download_free(chunk): resp = chunk.account.get(chunk.url, use_cache=True) check_errors(chunk, resp
) resp = hoster.xfilesharing_download(resp, 1)[0]() check_errors(chunk, resp) m = re.search('You have to wait (.*?) till next download', resp.text) if m: wait = hoster.parse_seconds2(m.group(1)) + time.time() if wait > 300: chunk.ip_blocked(wait) submit, data = hoster.xfilesharing_download(resp, 2) wait = resp.soup.find('span', id='uglrto') if wait: wait = int(wait.text.strip().rplit(' ', 1)[1]) + time.time() for result, challenge in chunk.solve_captcha('recaptcha', parse=resp.text, retries=5): data['recaptcha_challenge_field'] = challenge data['recaptcha_response_field'] = result if wait and wait - time.time() > 0: chunk.wait(wait - time.time()) resp = submit(allow_redirects=False) if resp.status_code == 302: return resp.headers['Location'] check_errors(chunk, resp)
""" Exit Status 1 is already used in the script. Zdd returns with exit status 1 when app is not force deleted either through argument or through prompt. Exit Status 2 is used for Unknown Exceptions. """ class InvalidArgException(Exception): """ This exception indicates invalid combination of arguments passed to zdd""" def __init__(self, msg): super(InvalidArgException, self).__init__(msg) self.error = msg self.zdd_exit_status = 3 class MissingFieldException(Exception): """ This exception indicates required fields which are missing in JSON payload passed to zdd""" def __init__(self, msg, field): super(MissingFieldException, self).__init__(msg) self.error = msg self.missing_field = field self.zdd_exit_status = 4 class MarathonLbEndpointException(Exception): """ This excaption indicates issue with one of the marathonlb endpoints specified as argument to Zdd""" def __init__(self, msg, url, error): super(MarathonLbEndpointException, self).__init__(msg) self.msg = msg self.url = url self.error = error self.zdd_exit_status = 5 class MarathonEndpointException(Exception): """ This excaption indicates issue with marathon endpoint specified as argument to Zdd""" def __init__(self, msg, url, error): super(MarathonEndpointException, self).__init__(msg) self.msg = msg self.url = url self.error = error self.zdd_exit_status = 6 class AppCreateException(Exception): """ This exception indicates there was a error while creating the new App and hence it was not created.""" def __init__(self, msg, url, pay
load, error): super(AppCreateException, self).__init__(msg) self.msg = msg self.error = error self.url = url self.payload = payload self.zdd_exit_status = 7 class AppDeleteException(Exception): """ This exception i
ndicates there was a error while deleting the old App and hence it was not deleted """ def __init__(self, msg, url, appid, error): super(AppDeleteException, self).__init__(msg) self.msg = msg self.error = error self.url = url self.zdd_exit_status = 8 class AppScaleException(Exception): """ This exception indicated there was a error while either scaling up new app or while scaling down old app""" def __init__(self, msg, url, payload, error): super(AppScaleException, self).__init__(msg) self.msg = msg self.error = error self.url = url self.payload = payload self.zdd_exit_status = 9
lib import pofile from autotranslate.poutil import find_pos, pagination_range, timestamp_with_timezone from autotranslate.signals import entry_changed, post_save from autotranslate.storage import get_storage from autotranslate.access import can_translate, can_translate_language import json import re import autotranslate import unicodedata import hashlib import os import six @never_cache @user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL) def home(request): """ Displays a list of messages to be translated """ def fix_nls(in_, out_): """Fixes submitted translations by filtering carriage returns and pairing newlines at the begging and end of the translated string with the original """ if 0 == len(in_) or 0 == len(out_): return out_ if "\r" in out_ and "\r" not in in_: out_ = out_.replace("\r", '') if "\n" == in_[0] and "\n" != out_[0]: out_ = "\n" + out_ elif "\n" != in_[0] and "\n" == out_[0]: out_ = out_.lstrip() if 0 == len(out_): pass elif "\n" == in_[-1] and "\n" != out_[-1]: out_ = out_ + "\n" elif "\n" != in_[-1] and "\n" == out_[-1]: out_ = out_.rstrip() return out_ def _request_request(key, default=None): if key in request.GET: return request.GET.get(key) elif key in request.POST: return request.POST.get(key) return default storage = get_storage(request) query = '' if storage.has('autotranslate_i18n_fn'): autotranslate_i18n_fn = storage.get('autotranslate_i18n_fn') autotranslate_i18n_app = get_app_name(autotranslate_i18n_fn) autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code') autotranslate_i18n_lang_bidi = autotranslate_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI autotranslate_i18n_write = storage.get('autotranslate_i18n_write', True) if autotranslate_i18n_write: autotranslate_i18n_pofile = pofile(autotranslate_i18n_fn, wrapwidth=autotranslate_settings.POFILE_WRAP_WIDTH) for entry in autotranslate_i18n_pofile: entry.md5hash = hashlib.md5( (six.text_type(entry.msgid) + six.text_type(entry.msgstr) + six.text_type(entry.msgctxt or "")).encode('utf8') ).hexdigest() else: autotranslate_i18n_pofile = storage.get('autotranslate_i18n_pofile') if 'filter' in request.GET: if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'): filter_ = request.GET.get('filter') storage.set('autotranslate_i18n_filter', filter_) return HttpResponseRedirect(reverse('autotranslate-home')) autotranslate_i18n_filter = storage.get('autotranslate_i18n_filter', 'all') if '_next' in request.POST: rx = re.compile(r'^m_([0-9a-f]+)') rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)') file_change = False for key, value in request.POST.items(): md5hash = None plural_id = None if rx_plural.match(key): md5hash = str(rx_plural.match(key).groups()[0]) # polib parses .po files into unicode strings, but # doesn't bother to convert plural indexes to int, # so we need unicode here. plural_id = six.text_type(rx_plural.match(key).groups()[1]) # Above no longer true as of Polib 1.0.4 if plural_id and plural_id.isdigit(): plural_id = int(plural_id) elif rx.match(key): md5hash = str(rx.match(key).groups()[0]) if md5hash is not None: entry = autotranslate_i18n_pofile.find(md5hash, 'md5hash') # If someone did a makemessage, some entries might # have been removed, so we need to check. if entry: old_msgstr = entry.msgstr if plural_id is not None: plural_string = fix_nls(entry.msgid_plural, value) entry.msgstr_plural[plural_id] = plural_string else: entry.msgstr = fix_nls(entry.msgid, value) is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False)) old_fuzzy = 'fuzzy' in entry.flags if old_fuzzy and not is_fuzzy: entry.flags.remove('fuzzy') elif not old_fuzzy and is_fuzzy: entry.flags.append('fuzzy') file_change = True if old_msgstr != value or old_fuzzy != is_fuzzy: entry_changed.send(sender=entry, user=request.user, old_msgstr=old_msgstr, old_fuzzy=old_fuzzy, pofile=autotranslate_i18n_fn, language_code=autotranslate_i18n_lang_code, ) else: storage.set('autotranslate_last_save_error', True) if file_change and autotranslate_i18n_write: try: autotranslate_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % ( getattr(request.u
ser, 'first_name', '
Anonymous'), getattr(request.user, 'last_name', 'User'), getattr(request.user, 'email', 'anonymous@user.tld') )).encode('ascii', 'ignore') autotranslate_i18n_pofile.metadata['X-Translated-Using'] = u"dj-translate %s" % autotranslate.get_version(False) autotranslate_i18n_pofile.metadata['PO-Revision-Date'] = timestamp_with_timezone() except UnicodeDecodeError: pass try: autotranslate_i18n_pofile.save() po_filepath, ext = os.path.splitext(autotranslate_i18n_fn) if autotranslate_settings.AUTO_COMPILE: save_as_mo_filepath = po_filepath + '.mo' autotranslate_i18n_pofile.save_as_mofile(save_as_mo_filepath) post_save.send(sender=None, language_code=autotranslate_i18n_lang_code, request=request) # Try auto-reloading via the WSGI daemon mode reload mechanism if autotranslate_settings.WSGI_AUTO_RELOAD and \ 'mod_wsgi.process_group' in request.environ and \ request.environ.get('mod_wsgi.process_group', None) and \ 'SCRIPT_FILENAME' in request.environ and \ int(request.environ.get('mod_wsgi.script_reloading', '0')): try: os.utime(request.environ.get('SCRIPT_FILENAME'), None) except OSError: pass # Try auto-reloading via uwsgi daemon reload mechanism if autotranslate_settings.UWSGI_AUTO_RELOAD: try: import uwsgi # pretty easy right? uwsgi.reload() except: # we may not be running under uwsgi :P pass except Exception as e: messages.error(request, e) storage.set('autotranslate_i18n_write', False) storage.set('aut
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """contrib module containing volatile or experimental code.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Add projects here, they will show up under tf.contrib. from tensorflow.contrib import bayesflow from tensorflow.contrib import cloud from tensorflow.contrib import compiler from tensorflow.contrib import copy_graph from tensorflow.contrib import crf from tensorflow.contrib import cudnn_rnn from tensorflow.contrib import data from tensorflow.contrib import deprecated from tensorflow.contrib import distributions from tensorflow.contrib import factorization from tensorflow.contrib import framework from tensorflow.contrib import graph_editor from tensorflow.contrib import grid_rnn from tensorflow.contrib import image from tensorflow.contrib import input_pipeline from tensor
flow.contrib import integrate from tensorflow.contrib import keras from tensorflow.contrib import kernel_methods from tensorflow.contrib import labeled_tensor from tensorflow.contrib import layers from tensorflow.contrib imp
ort learn from tensorflow.contrib import legacy_seq2seq from tensorflow.contrib import linalg from tensorflow.contrib import linear_optimizer from tensorflow.contrib import lookup from tensorflow.contrib import losses from tensorflow.contrib import memory_stats from tensorflow.contrib import metrics from tensorflow.contrib import nccl from tensorflow.contrib import nn from tensorflow.contrib import opt from tensorflow.contrib import quantization from tensorflow.contrib import rnn from tensorflow.contrib import saved_model from tensorflow.contrib import seq2seq from tensorflow.contrib import signal from tensorflow.contrib import slim from tensorflow.contrib import solvers from tensorflow.contrib import sparsemax from tensorflow.contrib import staging from tensorflow.contrib import stat_summarizer from tensorflow.contrib import stateless from tensorflow.contrib import tensor_forest from tensorflow.contrib import tensorboard from tensorflow.contrib import testing from tensorflow.contrib import tfprof from tensorflow.contrib import training from tensorflow.contrib import util from tensorflow.contrib.ndlstm import python as ndlstm from tensorflow.contrib.specs import python as specs from tensorflow.python.util.lazy_loader import LazyLoader ffmpeg = LazyLoader("ffmpeg", globals(), "tensorflow.contrib.ffmpeg") del LazyLoader del absolute_import del division del print_function
import json class AbstractionUtility(object): @staticmethod def read_json(json_file): # read json data with open(json_file, 'r') as f: data = json.load(f) # change json key string to int converted_data = {} for key, value in data.iteritems(): converted_data[int(key)] = value return converted_data @staticmethod def write_perabstraction(final_abstraction, log_file, perabstraction_file): # read log file with open(log_file, 'r') as f: logs = f.readlines() # write logs per abstraction to file f_perabstraction = open(perabstraction_file, 'w') for abstraction_id, abstraction in final_abstraction.iteritems(): f_perabstra
ction.write('Abstraction #' + str(abstraction_id) + ' ' + abstraction['abstraction'] + '\n') for line_id in abstracti
on['original_id']: f_perabstraction.write(str(line_id) + ' ' + logs[line_id]) f_perabstraction.write('\n') f_perabstraction.close() @staticmethod def write_perline(final_abstraction, log_file, perline_file): # read log file with open(log_file, 'r') as f: logs = f.readlines() # get line id and abstraction id abstraction_label = {} for abstraction_id, abstraction in final_abstraction.iteritems(): for line_id in abstraction['original_id']: abstraction_label[line_id] = abstraction_id # write log per line with abstraction id f_perline = open(perline_file, 'w') for line_id, log in enumerate(logs): f_perline.write(str(abstraction_label[line_id]) + '; ' + log) f_perline.close() @staticmethod def get_abstractionid_from_groundtruth(logid_abstractionid_file, abstractions): # read ground truth abstraction_groundtruth = AbstractionUtility.read_json(logid_abstractionid_file) groundtruth_length = len(abstraction_groundtruth.keys()) abstractions_edited_id = {} for abstraction_id, abstraction in abstractions.iteritems(): # if abstraction exist in ground truth, get id from dictionary key if abstraction['abstraction'] in abstraction_groundtruth.values(): new_id = \ abstraction_groundtruth.keys()[abstraction_groundtruth.values().index(abstraction['abstraction'])] # if not exist, new id is dictionary length + 1 else: new_id = groundtruth_length groundtruth_length += 1 abstractions_edited_id[new_id] = abstraction return abstractions_edited_id
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of Washington # See opus_core/LICENSE from urbansim.abstract_variables.abstract_travel_time_variable_for_non_interaction_dataset import abstract_travel_time_variable_for_non_interaction_dataset class SSS_travel_time_to_DDD(abstract_travel_time_variable_for_non_interaction_dataset): """Travel time
by mode SSS to the zone whose ID is the DDD. """ default_value = 999 origin_zone_id = 'zone.zone_id' def __init__(self, mode, number): self.travel_data_attribute = "travel_data.%s" % mode self.destination_zone_id = "destination_zone_id=%s+0*zone.zone_id" % number abstract_travel_time_variable_for_non_int
eraction_dataset.__init__(self) from opus_core.tests import opus_unittest from numpy import array, arange from opus_core.tests.utils.variable_tester import VariableTester class Tests(opus_unittest.OpusTestCase): def do(self,sss, ddd, should_be): tester = VariableTester( __file__, package_order=['urbansim'], test_data={ "zone":{ "zone_id":array([1,3])}, "travel_data":{ "from_zone_id":array([3,3,1,1]), "to_zone_id":array([1,3,1,3]), sss:array([1.1, 2.2, 3.3, 4.4])} } ) instance_name = "sanfrancisco.zone.%s_travel_time_to_%s" % (sss, ddd) tester.test_is_close_for_family_variable(self, should_be, instance_name) def test_to_1(self): should_be = array([3.3, 1.1]) self.do('hwy', 1, should_be) def test_to_3(self): should_be = array([4.4, 2.2]) self.do('bart', 3, should_be) if __name__=='__main__': opus_unittest.main()
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_equality --------------------
-------------- Tests for the `SetType` low() method """
import unittest from finitio.types import SetType, BuiltinType, Type builtin_string = BuiltinType(str) class TestSetTypeLow(unittest.TestCase): class HighType(Type): def low(self): return builtin_string subject = SetType(HighType("")) def test_equals_itself(self): expected = SetType(builtin_string) self.assertEqual(self.subject.low(), expected) if __name__ == '__main__': import sys sys.exit(unittest.main())
from djpcms import sites if sites.settings.CMS_ORM == 'django': from djpcms.core.cmsmodels._django import * elif sites.settings.CMS_ORM == 'stdnet': from djpcms.core.cmsmodels._stdnet import * else: raise NotImpleme
ntedError('Objecr Relational Mapper {0} not available for CMS models'.format(sites.settings
.CMS_ORM))
import time import arcpy from arcpy import env from arcpy.sa import * # Set
environment settings env.workspace = "" # set your workspace arcpy.env.overwriteOutput = True # Check out the ArcGIS Spatial Analyst extension license arcpy.CheckOutExtension("Spa
tial") tic = time.clock() a_file = "random_a.tif" b_file = "random_b.tif" c_file = "random_c.tif" out_file = "output.tif" a = Raster(a_file) b = Raster(b_file) c = Raster(c_file) out = 3 * a + b * c out.save(out_file)
# -*- coding: utf-8 -*- """ pygments.styles.vs ~~~~~~~~~~~~~~~~~~ Simple style with MS Visual Studio colors. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Operator, Generic class VisualStudioStyle(Style): background_color = "#ffffff" default_style = "" styles = { Comment: "#008000", Comment.Preproc: "#0000ff", Keywor
d: "#0000ff", Operator.Word: "#0000ff", Keyword.Type: "#2b91af", Name.Class: "#2b91af", String: "#a315
15", Generic.Heading: "bold", Generic.Subheading: "bold", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold", Error: "border:#FF0000" }
# -*- coding:utf-8 -*- # Copyright 2015 NEC Corporation. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless
required by applicable law or agreed to in writing, software # #
distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # from org.o3project.odenos.core.component.network.flow.basic.flow_action\ import FlowAction import unittest class FlowActionTest(unittest.TestCase): Type = "FlowActionOutput" def setUp(self): self.target = FlowAction(self.Type) def tearDown(self): self.target = None def test_constructor(self): self.assertEqual(self.target._body[self.target.TYPE], self.Type) def test_type(self): self.assertEqual(self.target.type, self.Type) if __name__ == '__main__': unittest.main()
id) return res @api.multi def _get_user_domain(self, dest_company): self.ensure_one() group_purchase_user = self.env.ref('purchase.group_purchase_user') return [ ('id', '!=', 1), ('company_id', '=', dest_company.id), ('id', 'in', group_purchase_user.users.ids), ] @api.multi def _check_intercompany_product(self, dest_company): domain = self._get_user_domain(dest_company) dest_user = self.env['res.users'].search(domain, limit=1) if dest_user: for purchase_line in self.order_line: try: purchase_line.product_id.sudo(dest_user).read( ['default_code']) except: raise UserError(_( "You cannot create SO from PO because product '%s' " "is not intercompany") % purchase_line.product_id.name) @api.multi def _inter_company_create_sale_order(self, dest_company_id): """ Create a Sale Order from the current PO (self) Note : In this method, should be call in sudo with the propert destination company in the context :param company : the company of the created PO :rtype company : res.company record """ self.ensure_one() dest_company = self.env['res.company'].browse(dest_company_id) # check intercompany product self._check_intercompany_product(dest_company) # Accessing to selling partner with selling user, so data like # property_account_position can be retrieved company_partner = self.company_id.partner_id # check pricelist currency should be same with PO/SO document if self.pricelist_id.currency_id.id != ( company_partner.property_product_pricelist.currency_id.id): raise UserError(_( 'You cannot create SO from PO because ' 'sale price list currency is different from ' 'purchase price list currency.')) # create the SO and generate its lines from the PO lines sale_order_data = self._prepare_sale_order_data( self.name, company_partner, dest_company, self.dest_address_id and self.dest_address_id.id or False) sale_order = self.env['sale.order'].create(sale_order_data) for purchase_line in self.order_line: sale_line_data = self._prepare_sale_order_line_data( purchase_line, dest_company, sale_order) self.env['sale.order.line'].create(sale_line_data) # write supplier reference field on PO if not self.partner_ref: self.partner_ref = sale_order.name # write invoice method field on PO if self.invoice_method != 'intercompany': self.invoice_method = 'intercompany' # Validation of sale order if dest_company.sale_auto_validation: sale_order.signal_workflow('order_confirm') @api.multi def _prepare_sale_order_data(self, name, partner, dest_company, direct_delivery_address): """ Generate the Sale Order values from the PO :param name : the origin client reference :rtype name : string :param partner : the partner reprenseting the company :rtype partner : res.partner record :param company : the company of the created SO :rtype company : res.company record :param direct_delivery_address : the address of the SO :rtype direct_delivery_address : res.partner record """ self.ensure_one() partner_addr = partner.address_get(['default', 'invoice', 'delivery', 'contact']) # find location and warehouse, pick warehouse from company object warehouse = ( dest_company.warehouse_id and dest_company.warehouse_id.company_id.id == dest_company.id and dest_company.warehouse_id or False) if not warehouse: raise UserError(_( 'Configure correct warehouse for company (%s) in ' 'Menu: Settings/companies/companies' % (dest_company.name))) partner_shipping_id = ( self.picking_type_id.warehouse_id and self.picking_type_id.warehouse_id.partner_id and self.picking_type_id.warehouse_id.partner_id.id or False) return { 'name': ( self.env['ir.sequence'].next_by_code('sale.order') or '/' ), 'company_id': dest_company.id, 'client_order_ref': name, 'partner_id': partner.id, 'warehouse_id': warehouse.id, 'pricelist_id': partner.property_product_pricelist.id, 'partner_invoice_id': partner_addr['invoice'], 'date_order': self.date_order, 'fiscal_position': (partner.property_account_position and partner.property_account_position.id or False), 'user_id': False, 'auto_purchase_order_id': self.id, 'partner_shipping_id': (direct_delivery_address or partner_shipping_id or partner_addr['delivery']), 'note': self.notes } @api.model def _prepare_sale_order_line_data( self, purchase_line, dest_company, sale_order): """ Generate the Sale Order Line values from the PO line :param line : the origin Purchase Order Line :rtype line : purchase.order.line record :param company : the company of the created SO :rtype company : res.company record :param sale_order : the Sale Order """ context = self._context.copy() context['company_id'] = dest_company.id # get sale line data from product onchange sale_line_obj = self.env['sale.order.line'].browse(False) sale_line_data = sale_line_obj.with_context( context).product_id_change_with_wh( pricelist=sale_order.pricelist_id.id, product=(purchase_line.product_id and purchase_line.product_id.id or False), qty=purchase_line.product_qty, uom=(purchase_line.product_id and purchase_line.product_id.uom_id.id or False), qty_uos=0, uos=False, name='', partner_id=sale_order.partner_id.id, lang=False, update_tax=True, date_order=sale_order.date_order, packaging=False, fiscal_position=sale_order.fiscal_position.id, flag=False, warehouse_id=sale_order.warehouse_id.id) sale_line_data['value']['product_id'] = ( purchase_line.product_id and purchase_line.product_id.id or False) sale_line_data['value']['order_id'] = sale_order.id sale_line_data['value']['delay'] = (purchase_line.product_id and purchase_line.product_id. sale_delay or 0.0) sale_line_data['value']['company_id'] = dest_company.id sale_line_data['value']['product_uom_qty'] = (purchase_line. product_qty) sale_line_data['value']['product_uom'] = ( purchase_line.product_id and purchase_line.product_id.uom_id.id or purchase_line.product_uom.id) if sale_line_data['value'].get('tax_id'):
sale_line_data['value']['tax_id'] = ([ [6, 0, sale_line_data['value']['tax_id']]])
sale_line_data['value']['auto_purchase_line_id'] = purchase_line.id return sale_line_data['value'] @api.multi def action_cancel(self):
#from: http://stackoverflow.com/questions/10361820/simple-twisted-echo-client #and #from: http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user from twisted.internet.threads import deferToThread as _deferToThread from twisted.int
ernet import reactor class ConsoleInput(object): def __init__(self, stopFunction, reconnec
tFunction): self.stopFunction = stopFunction self.reconnectFunction = reconnectFunction def start(self): self.terminator = 'q' self.restart = 'r' self.getKey = _Getch() self.startReceiving() def startReceiving(self, s = ''): if s == self.terminator: self.stopFunction() elif s == self.restart: self.reconnectFunction() _deferToThread(self.getKey).addCallback(self.startReceiving) else: _deferToThread(self.getKey).addCallback(self.startReceiving) class _Getch: """ Gets a single character from standard input. Does not echo to the screen. """ def __init__(self): try: self.impl = _GetchWindows() except ImportError: self.impl = _GetchUnix() def __call__(self): return self.impl() class _GetchUnix: def __init__(self): import tty, sys def __call__(self): import sys, tty, termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class _GetchWindows: def __init__(self): import msvcrt def __call__(self): import msvcrt return msvcrt.getch()
# -*- coding: utf-8 -*- import mock from rest_framework import serializers from waffle.testutils import override_switch from olympia.amo.tests import ( BaseTestCase, addon_factory, collection_factory, TestCase, user_factory) from olympia.bandwagon.models import CollectionAddon from olympia.bandwagon.serializers import ( CollectionAddonSerializer, CollectionAkismetSpamValidator, CollectionSerializer, CollectionWithAddonsSerializer) from olympia.lib.akismet.models import AkismetReport class TestCollectionAkismetSpamValidator(TestCase): def setUp(self): self.validator = CollectionAkismetSpamValidator( ('name', 'description')) serializer = mock.Mock() serializer.instance = collection_factory( name='name', description='Big Cheese') request = mock.Mock() request.user = user_factory() request.META = {} serializer.context = {'request': request} self.validator.set_context(serializer) self.data = { 'name': {'en-US': 'Collection', 'fr': u'Collection'}, 'description': {'en-US': 'Big Cheese', 'fr': u'une gránd fromagé'}, 'random_data': {'en-US': 'to ignore'}, 'slug': 'cheese'} @override_switch('akismet-spam-check', active=False) @mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check') def test_waffle_off(self, comment_check_mock): self.validator(self.data) # No Akismet checks assert AkismetReport.objects.count() == 0 comment_check_mock.assert_not_called() @override_switch('akismet-spam-check', a
ctive=True) @mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check') def test_ham(self, comment_check_mock): comment_check_mock.return_value = AkismetReport.HAM self.validator(self.data) # Akismet check is there assert AkismetReport.objects.count() == 2 name_report = AkismetReport.objects.first() # name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name' assert name_report.comment == self.data['name']['en-US'] summary_report = AkismetReport.objects.last() # en-US description won't be there because it's an existing description assert summary_report.comment_type == 'collection-description' assert summary_report.comment == self.data['description']['fr'] assert comment_check_mock.call_count == 2 @override_switch('akismet-spam-check', active=True) @mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check') def test_spam(self, comment_check_mock): comment_check_mock.return_value = AkismetReport.MAYBE_SPAM with self.assertRaises(serializers.ValidationError): self.validator(self.data) # Akismet check is there assert AkismetReport.objects.count() == 2 name_report = AkismetReport.objects.first() # name will only be there once because it's duplicated. assert name_report.comment_type == 'collection-name' assert name_report.comment == self.data['name']['en-US'] summary_report = AkismetReport.objects.last() # en-US description won't be there because it's an existing description assert summary_report.comment_type == 'collection-description' assert summary_report.comment == self.data['description']['fr'] # After the first comment_check was spam, additinal ones are skipped. assert comment_check_mock.call_count == 1 class TestCollectionSerializer(BaseTestCase): serializer = CollectionSerializer def setUp(self): super(TestCollectionSerializer, self).setUp() self.user = user_factory() self.collection = collection_factory() self.collection.update(author=self.user) def serialize(self): return self.serializer(self.collection).data def test_basic(self): data = self.serialize() assert data['id'] == self.collection.id assert data['uuid'] == self.collection.uuid assert data['name'] == {'en-US': self.collection.name} assert data['description'] == {'en-US': self.collection.description} assert data['url'] == self.collection.get_abs_url() assert data['addon_count'] == self.collection.addon_count assert data['modified'] == ( self.collection.modified.replace(microsecond=0).isoformat() + 'Z') assert data['author']['id'] == self.user.id assert data['slug'] == self.collection.slug assert data['public'] == self.collection.listed assert data['default_locale'] == self.collection.default_locale class TestCollectionAddonSerializer(BaseTestCase): def setUp(self): self.collection = collection_factory() self.addon = addon_factory() self.collection.add_addon(self.addon) self.item = CollectionAddon.objects.get(addon=self.addon, collection=self.collection) self.item.comments = u'Dis is nice' self.item.save() def serialize(self): return CollectionAddonSerializer(self.item).data def test_basic(self): data = self.serialize() assert data['addon']['id'] == self.collection.addons.all()[0].id assert data['notes'] == {'en-US': self.item.comments} class TestCollectionWithAddonsSerializer(TestCollectionSerializer): serializer = CollectionWithAddonsSerializer def setUp(self): super(TestCollectionWithAddonsSerializer, self).setUp() self.addon = addon_factory() self.collection.add_addon(self.addon) def serialize(self): mock_viewset = mock.MagicMock() collection_addons = CollectionAddon.objects.filter( addon=self.addon, collection=self.collection) mock_viewset.get_addons_queryset.return_value = collection_addons return self.serializer( self.collection, context={'view': mock_viewset}).data def test_basic(self): super(TestCollectionWithAddonsSerializer, self).test_basic() collection_addon = CollectionAddon.objects.get( addon=self.addon, collection=self.collection) data = self.serialize() assert data['addons'] == [ CollectionAddonSerializer(collection_addon).data ] assert data['addons'][0]['addon']['id'] == self.addon.id
from dj
ango.conf.urls import include, url urlpatterns = [ url(r'
^avatar/', include('avatar.urls')), ]
import setuptools setuptools.setup( name="sirius", version
="0.5", author="", author_email="simon.pintarelli@cscs.ch", description="pySIRIUS", url="https://github.com/electronic_structure/SIRIUS", packages=['sirius'], install_requires=['mpi4py', 'voluptuous', 'numpy', 'h5py', 'scipy', 'PyYAML'], class
ifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
from csacompendium.locations.models import Precipitation from csacompendium.utils.pagination import APILimitOffsetPagination from csacompendium.utils.permissions import IsOwnerOrReadOnly from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook from rest_framework.filters import DjangoFilterBackend from rest_framework.generics import ListAPIView from rest_framework.permissions import IsAuthenticated, IsAdminUser from .filters import PrecipitationListFilter from csacompendium.locations.api.precipitation.precipitationserializers import precipitation_serializers def precipitation_views(): """ Precipitation views :return: All precipitation views :rtype: Object """ precipitation_serializer = precipitation_serializers() class PrecipitationCreateAPIView(CreateAPIViewHook): """ Creates a single record. """ queryset = Precipitation.objects.all() serializer_class = precipitation_serializer['PrecipitationDetailSerializer'] permission_classes = [IsAuthenticated] class PrecipitationListAPIView(ListAPIView): """ API list view. Gets all records API. """ queryset = Precipitation.objects.all() serializer_class = precipitation
_serializer['PrecipitationListSerializer'] filter_backends = (DjangoFilterBackend,) filter_class = PrecipitationListFilter pagination_class = APILimitOffsetPagination class PrecipitationDetailAPIView(DetailViewUpdateDelete): """ Updates a record. """ queryset = Precipit
ation.objects.all() serializer_class = precipitation_serializer['PrecipitationDetailSerializer'] permission_classes = [IsAuthenticated, IsAdminUser] lookup_field = 'pk' return { 'PrecipitationListAPIView': PrecipitationListAPIView, 'PrecipitationDetailAPIView': PrecipitationDetailAPIView, 'PrecipitationCreateAPIView': PrecipitationCreateAPIView }
as submodule elif args.command == 'numevents': import numevents as submodule elif args.command == 'events': import get_events as submodule elif args.command == 'staypos': import staypos as submodule elif args.command == 'info': import info as submodule elif args.command == 'g4' or args.command == 'regex': import quadparsersuite as submodule elif args.command == 'seqlen': import seqlen as submodule elif args.command == 'dataconc': import dataconc as submodule elif args.command == 'qualpos': import qual_v_pos as submodule elif args.command == 'kmer': import kmer as submodule elif args.command == 'kmerplot': import kmerplot as submodule elif args.command == 'kmerdiff': import kmerdiff as submodule ## elif args.command == 'align': ## import align as submodule elif args.command == 'winner': import winner as submodule elif args.command == 'qualdist': import qualdist as submodule # run the chosen submodule. submodule.run(parser, args) class ArgumentParserWithDefaults(argparse.ArgumentParser): def __init__(self, *args, **kwargs): super(ArgumentParserWithDefaults, self).__init__(*args, **kwargs) self.add_argument("-q", "--quiet", help="Do not output warnings to stderr", action="store_true", dest="quiet") def main(): logging.basicConfig() ######################################### # create the top-level parser ######################################### parser = argparse.ArgumentParser(prog='poreminion', description=""" Poreminion - additional tools for analyzing nanopore sequencing data.""", formatter_class=argparse.RawTextHelpFormatter)#ArgumentDefaultsHelpFormatter) parser.add_argument("-v", "--version", help="Installed poreminion version", action="version", version="%(prog)s " + str(poreminion.version.__version__)) subparsers = parser.add_subparsers(title='[sub-commands]', dest='command', parser_class=ArgumentParserWithDefaults) ######################################### # create the individual tool parsers ######################################### ########## # find uncalled (not basecalled) files ########## parser_uncalled = subparsers.add_parser('uncalled', help='Find Fast5 files that were not base-called.') parser_uncalled.add_argument('files', metavar='FILES', nargs='+', help='The input FAST5 files.') parser_uncalled.add_argument('--outprefix', "-o", type=str, required=True, help='Uses this as basename for the following output files: (1) list of files not basecalled because template events not found, (2) list of files not basecalled because too few events found, (3) list of files not basecalled because too many events found. (4) event stats on each.') parser_uncalled.add_argument('--move', "-m", action='store_true', default=False, help='''If specified, will move each non-basecalled file type to an approp labeled dir inside same dir that has the dir reads with reads in it (e.g. downloads --> pass, downloads --> fail, downloads --> "notemplate", etc). Still writes out stats file.''') parser_uncalled.set_defaults(func=run_subtool) ########## # findTimeErrors ########## parser_timetest = subparsers.add_parser('timetest', help='Find Fast5 files that have event times that are earlier than event times before it suggesting malfunction/erroneous read.') parser_timetest.add_argument('files', metavar='FILES', nargs='+', help='The input FAST5 files.') parser_timetest.add_argument('--outprefix', "-o", type=str, default=False, help='Uses this as basename for file containing list of files with time errors.') parser_timetest.add_argument('--move', "-m", action='store_true', default=False, help='''If specified, will move files with time error dir labeled time_errors inside same dir that has the dir with reads in it (e.g. downloads --> pass, downloads --> fail, downloads --> "time_errors", etc). Still writes out list file above.''') parser_timetest.add_argument('--verbose', "-v", action='store_true', default=False, help='''Will print to stderr info about how far along it is in process.''') parser_timetest.set_defaults(func=run_subtool) ########## # fragstats ########## parser_fragstats = subparsers.add_parser('fragstats', help='''Run this on set of base-called fast5 files. Returns tab-delimited table with columns: 1 = readname, 2 = estimated molecule/fragment size, 3 = number input events, 4 = if complement detected, 5 = if 2D detected, 6 = num template events, 7 = num complement events, 8 = length of 2D sequence, 9 = length of template sequence, 10 = length of complement sequence, 11 = mean qscore of 2D sequence, 12 = mean qscore of template sequence, 13 = mean qscore of complement, 14 = ratio of number template events to number complement events, 15 = channel number molecule traversed 16 = heat sink temperature while molecule traversed 17 = num called template events (after events pruned during base-calling) 18 = num called complement events (after events pruned during base-calling) 19 = num skips in template (is actually number 0 moves found in ext
ensive analysi
s) 20 = num skips in complement (is actually number 0 moves found in extensive analysis) 21 = num stays in template (is actually number 2 moves found in extensive analysis, any 3,4,5 moves not counted here) 22 = num stays in complement (is actually number 2 moves found in extensive analysis, any 3,4,5 moves not counted here) 23 = strand score template 24 = strand score complement 25 = num stutters in template 26 = num stutters in complement If --extensive used: 27 = starttime, 28 = endtime, 29 = slope across all events, 30 = mean duration across all events, 31 = median duration across all events, 32 = sd of all event durations, 33 = min event duration, 34 = max event duration, 35-40 = num temp events with 0,1,2,3,4,5 moves from base-caller, 41-46 = num comp events with 0,1,2,3,4,5 moves from base caller. If -g4/--quadruplex used: Final+1 = number of G4 motifs in 2D read: '([gG]{3,}\w{1,7}){3,}[gG]{3,}' Final+2 = number of G4 motifs in template read Final+3 = number of G4 motifs in complement read Final+4 = number of G4 complement motifs in 2D reads: '([cC]{3,}\w{1,7}){3,}[cC]{3,}' Final+5 = number of G4 complement motifs in template read (i.e. inferred complement strand count given template read) Final+6 = number of G4 complement motifs in complement read (i.e. inferred template strand count given complement read) If --checktime used: Final column (after even G4 info) = 0 or 1 for no/yes there is a time error present. Estimates molecule/fragment size in the following way. If has 2D, molecule size is the length of 2D read. If template only, molecule size is the length of template read. If template and complement, but no 2D, molecule size is length of the longer read between template and complement. Molecule size allows calculation of total non-redundant data. This is the sum of unique molecule lengths rather than summing all read types from each molecule. From the molecule sizes, the "Molecule N50" can be computed using the nx subcommand on the fragstats file and specifying colum 2. ''') parser_fragst
LOG = logging.getLogger(__name__) class CbBackup(base.RestoreRunner): """ Implementation of Restore Strategy for Couchbase. """ __strategy_name__ = 'cbbackup' base_restore_cmd = 'sudo tar xpPf -' def __init__(self, *args, **kwargs): super(CbBackup, self).__init__(*args, **kwargs) def pre_restore(self): try: operating_system.remove(system.COUCHBASE_DUMP_DIR, force=True) except exception.ProcessExecutionError: LOG.exception(_("Error during pre-restore phase.")) raise def post_restore(self): try: # Root enabled for the backup pwd_file = system.COUCHBASE_DUMP_DIR + system.SECRET_KEY if os.path.exists(pwd_file): with open(pwd_file, "r") as f: pw = f.read().rstrip("\n") root = service.CouchbaseRootAccess() root.set_password(pw) # Get current root password root = service.CouchbaseRootAccess() root_pwd = root.get_password() # Iterate through each bucket config buckets_json = system.COUCHBASE_DUMP_DIR + system.BUCKETS_JSON with open(buckets_json, "r") as f: out = f.read() if out == "[]": # No buckets or data to restore. Done. return d = json.loads(out) for i in range(len(d)): bucket_name = d[i]["name"] bucket_type = d[i]["bucketType"] if bucket_type == "membase": bucket_type = "couchbase" ram = int(utils.to_mb(d[i]["quota"]["ram"])) auth_type = d[i]["authType"] password = d[i]["saslPassword"] port = d[i]["proxyPort"] replica_number = d[i]["replicaNumber"] replica_index = 1 if d[i]["replicaIndex"] else 0 threads = d[i]["threadsNumber"] flush = 1 if "flush" in d[i]["controllers"] else 0 # cbrestore requires you to manually create dest buckets create_bucket_cmd = ('curl -X POST -u root:' + root_pwd + ' -d name="' + bucket_name + '"' + ' -d bucketType="' + bucket_type + '"' + ' -d ramQuotaMB="' + str(ram) + '"' + ' -d authType="' + auth_type + '"' + ' -d saslPassword="' + password + '"' + ' -d proxyPort="' + str(port) + '"' + ' -d replicaNumber="' + str(replica_number) + '"' + ' -d replicaIndex="' + str(replica_index) + '"' + ' -d threadsNumber="' + str(threads) + '"' + ' -d flushEnabled="' + str(flush) + '" ' + system.COUCHBASE_REST_API + '/pools/default/buckets') utils.execute_with_timeout(create_bucket_cmd, shell=True, timeout=300) if bucket_type == "memcached": continue # Wait for couchbase (membase) bucket creation to complete # (follows same logic as --wait for couchbase-cli) timeout_in_seconds = 120 start = time.time() bucket_exist = False while ((time.time() - start) <= timeout_in_seconds and not bucket_exist): url = (system.COUCHBASE_REST_API + '/pools/default/buckets/') outfile = system.COUCHBASE_DUMP_DIR + '/buckets.all' utils.execute_with_timeout('curl -u root:' + root_pwd + ' ' + url + ' > ' + outfile, shell=True, timeout=300) with open(outfile, "r") as file: out = file.read() buckets = json.loads(out) for bucket in buckets: if bucket["name"] == bucket_name: bucket_exist = True break if not bucket_exist: time.sleep(2) if not bucke
t_exist: raise base.RestoreError("Failed to create bucket '%s' " "within %s seconds" % (bucket_name, timeout_in_seconds)) # Query status # (follows same logic as --wait for couchbase-cli) healthy = False
while ((time.time() - start) <= timeout_in_seconds): url = (system.COUCHBASE_REST_API + '/pools/default/buckets/' + bucket_name) outfile = system.COUCHBASE_DUMP_DIR + '/' + bucket_name utils.execute_with_timeout('curl -u root:' + root_pwd + ' ' + url + ' > ' + outfile, shell=True, timeout=300) all_node_ready = True with open(outfile, "r") as file: out = file.read() bucket = json.loads(out) for node in bucket["nodes"]: if node["status"] != "healthy": all_node_ready = False break if not all_node_ready: time.sleep(2) else: healthy = True break if not healthy: raise base.RestoreError("Bucket '%s' is created but " "not ready to use within %s " "seconds" % (bucket_name, timeout_in_seconds)) # Restore restore_cmd = ('/opt/couchbase/bin/cbrestore ' + system.COUCHBASE_DUMP_DIR + ' ' + system.COUCHBASE_REST_API + ' --bucket-source=' + bucket_name + ' --bucket-destination=' + bucket_name + ' -u root' + ' -p ' + root_pwd) try: utils.execute_with_timeout(restore_cmd, shell=True, timeout=300) except exception.ProcessExecutionError: # cbrestore fails or hangs at times: # http://www.couchbase.com/issues/browse/MB-10832 # Retrying typically works LOG.exception(_("cbrestore failed. Retr
from __future__ import division import numpy as np from .._shared.utils import assert_nD from . import _hoghistogram def hog(image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False, normalise=False): """Extract Histogram of Oriented Gradients (HOG) for a given image. Compute a Histogram of Oriented Gradients (HOG) by 1. (optional) global image normalisation 2. computing the gradient image in x and y 3. computing gradient histograms 4. normalising across blocks 5. flattening into a feature vector Parameters ---------- image : (M, N) ndarray Input image (greyscale). orientations : int Number of orientation bins. pixels_per_cell : 2 tuple (int, int) Size (in pixels) of a cell. cells_per_block : 2 tuple (int,int) Number of cells in each block. visualise : bool, optional Also return an image of the HOG. normalise : bool, optional Apply power law compression to normalise the image before processing. Returns ------- newarr : ndarray HOG for the image as a 1D (flattened) array. hog_image : ndarray (if visualise=True) A visualisation of the HOG image. References ---------- * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients * Dalal, N and Triggs, B, Histograms of Oriented Gradients for Human Detection, IEEE Computer Society Conference on Computer Vision and Pattern Recognition 2005 San Diego, CA, USA """ image = np.atleast_2d(image) """ The first stage applies an optional global image normalisation equalisation that is designed to reduce the influence of illumination effects. In practice we use gamma (power law) compression, either computing the square root or the log of each colour channel. Image texture strength is typically proportional to the local surface illumination so this compression helps to reduce the effects of local shadowing and illumination variations. """ assert_nD(image, 2) if normalise: image = np.sqrt(image) """ The second stage computes first order image gradients. These capture contour, silhouette and some texture information, while providing further resistance to illumination variations. The locally dominant colour channel is used, which provides colour invariance to a large extent. Variant methods may also include second order image derivatives, which act as primitive bar detectors - a useful feature for capturing, e.g. bar like structures in bicycles and limbs in humans. """ if image.dtype.kind == 'u': # convert uint image to float # to avoid problems with subtracting unsigned numbers in np.diff() image = image.astype('float') gx = np.empty(image.shape, dtype=np.double) gx[:, 0] = 0 gx[:, -1] = 0 gx[:, 1:-1] = image[:, 2:] - image[:, :-2] gy = np.empty(image.shape, dtype=np.double) gy[0, :] = 0 gy[-1, :] = 0 gy[1:-1, :] = image[2:, :] - image[:-2, :] """ The third stage aims to produce an encoding that is sensitive to local image content while remaining resistant to small changes in pose or appearance. The adopted method pools gradient orientation information locally in the same way as the SIFT [Lowe 2004] feature. The image window is divided into small spatial regions, called "cells". For each cell we accumulate a local 1-D histogram of gradient or edge orientations over all the pixels in the cell. This combined cell-level 1-D histogram forms the basic "orientation histogram" representation. Each orientation histogram divides the gradient angle range into a fixed number of predetermined bins. The gradient magnitudes of the pixels in the cell are used to vote into the orientation histogram. """ sy, sx = image.shape cx, cy = pixels_per_cell bx, by = cells_per_block n_cellsx = int(np.floor(sx // cx)) # number of cells in x n_cellsy = int(np.floor(sy // cy)) # number of cells in y # compute orientations integral images orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations)) _hoghistogram.hog_histograms(gx, gy, cx, cy, sx, sy, n_cellsx, n_cellsy, orientations, orientation_histogram) # now for each cell, compute the histogram hog_image = None if visualise: from .. import draw radius = min(cx, cy) // 2 - 1 orientations_arr = np.arange(orientations) dx_arr = radius * np.cos(orientations_arr / orientatio
ns * np.pi) dy_arr = radius * np.sin(orientations_arr / orientations * np.pi) cr2 = cy + cy cc2 = cx + cx hog_image = np.zeros((sy, sx), dtype=float) for x in range(n_cellsx): for y in range(n_cellsy): for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr): centre = tuple([y * cr2 // 2, x * cc2 // 2]) rr, cc = draw.line(int(centre[0] -
dx), int(centre[1] + dy), int(centre[0] + dx), int(centre[1] - dy)) hog_image[rr, cc] += orientation_histogram[y, x, o] """ The fourth stage computes normalisation, which takes local groups of cells and contrast normalises their overall responses before passing to next stage. Normalisation introduces better invariance to illumination, shadowing, and edge contrast. It is performed by accumulating a measure of local histogram "energy" over local groups of cells that we call "blocks". The result is used to normalise each cell in the block. Typically each individual cell is shared between several blocks, but its normalisations are block dependent and thus different. The cell thus appears several times in the final output vector with different normalisations. This may seem redundant but it improves the performance. We refer to the normalised block descriptors as Histogram of Oriented Gradient (HOG) descriptors. """ n_blocksx = (n_cellsx - bx) + 1 n_blocksy = (n_cellsy - by) + 1 normalised_blocks = np.zeros((n_blocksy, n_blocksx, by, bx, orientations)) for x in range(n_blocksx): for y in range(n_blocksy): block = orientation_histogram[y:y + by, x:x + bx, :] eps = 1e-5 normalised_blocks[y, x, :] = block / np.sqrt(block.sum() ** 2 + eps) """ The final step collects the HOG descriptors from all blocks of a dense overlapping grid of blocks covering the detection window into a combined feature vector for use in the window classifier. """ if visualise: return normalised_blocks.ravel(), hog_image else: return normalised_blocks.ravel()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR
CONDITIONS OF ANY KIND,
either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html """ import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='murano') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
max_length=100, unique=True) title = models.CharField(_(u"title"),
max_length=50) logo = models.ImageField(_(u"Logo"), upload_to='media_root', # TEMP -> pbs with PIL... blank=True) ndds = models.ManyToManyField(Site, related_name="website") owners = models.ManyToManyField(User, through='WebSiteOw
ner') domain = models.ForeignKey(Site, related_name="website_set", unique=True, on_delete=models.PROTECT, help_text=_(u"Represents the main domain of the " "website.")) analytics_key = models.CharField(_("Analytics key"), max_length=20, blank=True, null=True, #regex=r'UA-[0-9]{7}-[0-9]{1}', help_text=u'e.g. "UA-2456069-3"') main_menu_levels = models.PositiveIntegerField(_("Main menu levels"), default=1) meta_keywords = models.CharField(_(u"META Keywords"), max_length="255", blank=True) meta_description = models.TextField(_(u"META Description"), blank=True) theme = models.CharField(_(u'Theme slug'), max_length=100) default_template = models.CharField(_(u'Default template'), max_length=100, blank=True) default_layout = models.CharField(_(u'Default layout'), max_length=100) # Warning, please use directory() to access the Files Library object files_library = models.ForeignKey(FileManager, related_name="website", blank=True, null=True, help_text=_(u"Files Library")) in_maintenance = models.BooleanField(_(u'Maintenance mode'), default=False, blank=True) class Meta: verbose_name = _(u"website") verbose_name_plural = _(u'websites') def __unicode__(self): return u'%s' % self.title def delete(self, *args, **kwargs): """ Delete this domain names linked to it and the files """ for ndd in self.ndds.all(): if ndd != self.domain: ndd.delete() save_ndd = self.domain #shutil.rmtree(self.media_root()) super(WebSite, self).delete(*args, **kwargs) # The domain name is protected until the website is deleted successfully save_ndd.delete() def get_theme(self): if len(self.theme.split('/')) <= 1: return "%s/default" % self.theme return self.theme def file_manager(self): if self.files_library: return self.files_library else: # Create root directory root = Directory.objects.create(name=self.slug) self.files_library = FileManager.objects.create(root=root) self.save() try: os.makedirs(self.media_root()) except OSError: pass # Create try: os.makedirs(os.path.join(self.media_root(), 'storage')) except OSError: pass return self.files_library def media_root(self): "Get the filemanager site root" return os.path.join('websites', self.slug, 'storage') def get_size(self): "Give the size used for quota in bytes" return folder_size(self.media_root()) def get_screenshot(self): "Return the url of the screenshot or None for the default image" return None def get_absolute_url(self): if getattr(settings, 'SERVER_PORT', 80) != 80: return u'http://%s:%d' % (self.domain.domain, settings.SERVER_PORT) else: return u'http://%s' % self.domain.domain def get_medias(self): # medias_list = [] # # Add css file of the template # medias_list.append( # u'<link href="http://%s%s" type="text/css" media="all" rel="stylesheet" />' % ( # self.domain.domain, self.skin.template.css_file )) # # Add css file of the skin # medias_list.append( # u'<link href="http://%s%s" type="text/css" media="all" rel="stylesheet" />' % ( # self.domain.domain, self.skin.css_file )) # return u"\n".join(medias_list) return "" medias = property(get_medias) def _get_layout(self, layout_name=None): if layout_name is not None: return 'layouts/%s' % layout_name else: return '' def get_default_layout(self): return self._get_layout(self.default_layout) layout = property(get_default_layout) # def get_header_layout(self): # return self._get_layout(self.header_layout) # def get_footer_layout(self): # return self._get_layout(self.footer_layout) # def render_header(self, request): # """ # Returns the header rendering of website. # """ # return render_plugins_header_or_footer( # request, # plugins_list=self.header_plugins.order_by('plugin_order'), # layout=self.get_header_layout()) # def render_footer(self, request): # """ # Returns the footer rendering of website. # """ # return render_plugins_header_or_footer( # request, # plugins_list=self.footer_plugins.order_by('plugin_order'), # layout=self.get_footer_layout()) def get_url_home_page(self): return u'/' class WebSiteOwner(models.Model): website = models.ForeignKey(WebSite, related_name='websites_owned') user = models.ForeignKey(User, related_name='websites_owned') is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user " "has all permissions without " "explicitly assigning them.")) def __unicode__(self): return u'%s owns %d (%s)' % (self.user, self.website.id, self.is_superuser) def delete(self, *args, **kwargs): number_of_owners = self.website.websites_owned.filter(is_superuser=True).count() if number_of_owners <= 1 and self.is_superuser: raise IntegrityError('This user is the only superuser of this website') else: super(WebSiteOwner, self).delete(*args, **kwargs) # SIGNALS def catch_wrong_deletion_of_user(sender, instance, **kwargs): ''' Verify that if we delete the website owner, it will still have no orphans websites ''' cursor = connection.cursor() cursor.execute(""" SELECT ws.title, COUNT(*) as owners FROM website_website ws INNER JOIN website_websiteowner wso ON ws.id = wso.website_id AND wso.is_superuser = TRUE AND ws.id IN (SELECT website_id FROM website_websiteowner WHERE user_id = %s) GROUP BY ws.title """, [instance.id]) websites_owned = cursor.fetchall() websites_alone = [] for website_title, owner_count in websites_owned: if website_title is not None and owner_count <= 1: websites_alone.append(website_title) if len(websites_alone) > 0: raise IntegrityError( 'This user is the only owner of the website(s) : %s' % ( ', '.join(websites_alone))) models.signals.pre_delete.connect(catch_wrong_deletion_of_user, sender=User) def create_filemanager_media_site_root(sender, instance, **kwargs): """
tes_sec_max_length") if params.get("read_bytes_sec_max_length"): cmd += " --read-bytes-sec-max-length %s" % params.get("read_bytes_sec_max_length") if params.get("write_bytes_sec_max_length"): cmd += " --write-bytes-sec-max-length %s" % params.get("write_bytes_sec_max_length") if params.get("size_iops_sec"): cmd += " --size-iops-sec %s" % params.get("size_iops_sec") if params.get("group_name"): cmd += " --group-name %s" % params.get("group_name") return command(cmd, **dargs) def perf(domain, options="", events="", other_opt="", **dargs): """ Enable or disable perf events :param domain: Domain name, id :param options: --enable | --disable :param events: perf event names seperated by comma :param other_opt: --config | --live | --current :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "perf %s %s %s %s" % (domain, options, events, other_opt) return command(cmd, **dargs) def domstats(domains="", options="", **dargs): """ Get statistics about one or multiple domains :param domains: List of domains :param options: Extra options :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "domstats %s %s" % (domains, options) return command(cmd, **dargs) def freepages(cellno=None, pagesize=None, options="", **dargs): """ Display available free pages for the NUMA cell :param cellno: NUMA cell number :param pagesize: Page size (in kibibytes) :param options: Extra options :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "freepages %s" % options if cellno is not None: cmd += " --cellno %s" % cellno if pagesize is not None: cmd += " --pagesize %s" % pagesize return command(cmd, **dargs) def domcapabilities(virttype=None, emulatorbin=None, arch=None, machine=None, options="", **dargs): """ Capabilities of emulator with respect to host and libvirt :param virttype: Virtualization type (/domain/@type) :param emulatorbin: Path to emulator binary (/domain/devices/emulator) :param arch: Domain architecture (/domain/os/type/@arch) :param machine: machine type (/domain/os/type/@machine) :param options: Extra options :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "domcapabilities %s" % options if virttype: cmd += " --virttype %s" % virttype if emulatorbin: cmd += " --emulatorbin %s" % emulatorbin if arch: cmd += " --arch %s" % arch if machine: cmd += " --machine %s" % machine return command(cmd, **dargs) def metadata(name, uri, options="", key=None, new_metadata=None, **dargs): """ Show or set domain's custom XML Metadata :param name: Domain name, id or uuid :param uri: URI of the namespace :param options: options may be live, config and current :param key: Key to be used as a namespace identifier :param new_metadata: new metadata to set :param dargs: standardized virsh function API keywords :return: CmdResult instance """ cmd = "metadata --domain %s --uri %s %s" % (name, uri, options) if key: cmd += " --key %s" % key if new_metadata: cmd += " --set '%s'" % new_metadata.replace("\'", "\"") return command(cmd, **dargs) def cpu_models(arch, options="", **dargs): """ Get the CPU models for an arch. :param arch: Architecture :param options: Extra options :param dargs: Sta
ndardized virsh function API keywords :return: CmdResult instance """ cmd = "cpu-models %s %s" % (arch, options) return command(cmd, **dargs) def net_dhcp_leases(network, mac=None, options="", **dargs): """ Print lease info for a given network :param network: Network name or uuid :param mac: Mac address :param options: Extra options :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "net-dhcp-leases %s %
s" % (network, options) if mac: cmd += " --mac %s" % mac return command(cmd, **dargs) def qemu_monitor_event(domain=None, event=None, event_timeout=None, options="", **dargs): """ Listen for QEMU Monitor Events :param domain: Domain name, id or UUID :param event: Event type name :param event_timeout: Timeout seconds :param options: Extra options :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "qemu-monitor-event %s" % options if domain: cmd += " --domain %s" % domain if event: cmd += " --event %s" % event if event_timeout: cmd += " --timeout %s" % event_timeout return command(cmd, **dargs) def net_event(network=None, event=None, event_timeout=None, options="", **dargs): """ List event types, or wait for network events to occur :param network: Network name or uuid :param event: Event type to wait for :param event_timeout: Timeout seconds :param options: Extra options :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "net-event %s" % options if network: cmd += " --network %s" % network if event: cmd += " --event %s" % event if event_timeout: cmd += " --timeout %s" % event_timeout return command(cmd, **dargs) def event(domain=None, event=None, event_timeout=None, options="", **dargs): """ List event types, or wait for domain events to occur :param domain: Domain name, id or UUID :param event: Event type name :param event_timeout: Timeout seconds :param options: Extra options :param dargs: Standardized virsh function API keywords :return: CmdResult instance """ cmd = "event %s" % options if domain: cmd += " --domain %s" % domain if event: cmd += " --event %s" % event if event_timeout: cmd += " --timeout %s" % event_timeout return command(cmd, **dargs) def move_mouse(name, coordinate, **dargs): """ Move VM mouse. :param name: domain name :param coordinate: Mouse coordinate """ cmd = "mouse_move %s %s" % coordinate qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs) # Sleep 1 sec to make sure VM received mouse move event time.sleep(1) def click_button(name, left_button=True, **dargs): """ Click left/right button of VM mouse. :param name: domain name :param left_button: Click left or right button """ state = 1 if not left_button: state = 4 cmd = "mouse_button %s" % state qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs) # Sleep 1 sec to make sure VM received mouse button event, # then release button(state=0) time.sleep(1) cmd = "mouse_button 0" qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs) time.sleep(1) def iothreadadd(name, thread_id, options=None, **dargs): """ Add an IOThread to the guest domain. :param name: domain name :param thread_id: domain iothread ID :param options: options may be live, config and current :param dargs: standardized virsh function API keywords :return: CmdResult instance """ cmd = "iothreadadd %s %s" % (name, thread_id) if options: cmd += " %s" % options return command(cmd, **dargs) def iothreaddel(name, thread_id, options=None, **dargs): """ Delete an IOThread from the guest domain. :param name: domain name :param thread_id: domain iothread ID :param options: options may be live, config and current :param dargs: standardized virsh function API keywords :return: CmdResult instance """ cmd = "iothreaddel %s %s" % (name, thread_id) if options: cmd += " %s" % options return command(cmd, **dargs) def iothreadin
# -*- coding: utf-8 -*- # # Author: François Rossigneux <francois.rossigneux@inria.fr> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BAS
IS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from climate import tests class DBUtilsTestCase(tests.TestCase): """Test case for DB Utils."""
pass
#!/home/bolt/.python_compiled/bin/python3 import math from PIL import Image def complex_wrapper(func, scale_factor=1): """ Modifies a complex function that takes a complex argument and returns a complex number to take a tuple and return a tuple. """ def inner(real, imag): complex_num=complex(real, imag) return_value=func(complex_num/scale_factor) return return_value.real, return_value.imag return inner def decorate_atan(func): """ A decorator to modify the range of atan from -pi to pi to 0 to 2*pi """ def inner(y, x): return_val=func(y, x) if return_val>=0: return return_val else: return 2*math.pi+return_val return inner atan=decorate_atan(math.atan2) def assign_color_shade(position): """ This function assigns a unique color shade to each angle in [0, 2*pi) """ x,y=position if (x,y)==(0,0): return (255, 255, 255) angle=atan(y,x) mod_angle=angle%(2/3*math.pi) mixing=mod_angle/(2/3*math.pi)*255
if angle<=2/3*math.pi: return (255-mixing, mixing, 0) elif 2/3*math.pi<angle<=4/3*math.pi: return (0, 255-mixing, mixing) else: return (mixing, 0, 255-mixing) def color_intensity(position, radius, gradient): """ This fun
ction assigns an intensity based on the radial distance and the gradient """ x,y=position shade_tuple=assign_color_shade(position) if x**2+y**2<radius**2: r,b,g=shade_tuple ratio=((x**2+y**2)/(radius**2))**gradient r_new,b_new,g_new=255-ratio*(255-r),255-ratio*(255-b),255-ratio*(255-g) return r_new,b_new,g_new else: ratio=((radius**2)/(x**2+y**2))**gradient r,b,g=shade_tuple return r*ratio,b*ratio,g*ratio def colorize_point(position, radius, gradient=1): """ This function combines the last 2 functions and returns the shade of each point """ r,b,g=color_intensity(position, radius, gradient) return round(r), round(b), round(g) def generate_plane_image(x_size, y_size, radius, gradient): """ This function generates the domain plane """ image=Image.new('RGB', (x_size, y_size)) x_c,y_c=x_size//2, y_size//2 for x in range(x_size): for y in range(y_size): image.putpixel((x,y), colorize_point((x-x_c, y-y_c), radius, gradient)) return image def map_function(plane_image, func, radius, gradient): """ This function maps the function on the domain plane """ image=Image.new('RGB', plane_image.size) x_size,y_size=plane_image.size x_c,y_c=x_size//2, y_size//2 for x in range(x_size): for y in range(y_size): x_new,y_new=func(x-x_c, y-y_c) try: new_colors=plane_image.getpixel((x_new+x_c, y_new+y_c)) except IndexError: new_colors=colorize_point((x_new, y_new), radius, gradient) image.putpixel((x,y), new_colors) return image
#!/usr/bin/env python # -*- mode: python; sh-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*- # vim: tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8 # # Shell command # Copyright 2010, Jeremy Grosser <synack
@digg.com> import argparse import os import sys import clusto from clusto import script_helper class Console(script_helper.Script): ''' Use clusto's hardware port mappings to console to a remote server using the serial console. ''' def __init__(self): script_he
lper.Script.__init__(self) def _add_arguments(self, parser): user = os.environ.get('USER') parser.add_argument('--user', '-u', default=user, help='SSH User (you can also set this in clusto.conf too' 'in console.user: --user > clusto.conf:console.user > "%s")' % user) parser.add_argument('server', nargs=1, help='Object to console to (IP or name)') def add_subparser(self, subparsers): parser = self._setup_subparser(subparsers) self._add_arguments(parser) def run(self, args): try: server = clusto.get(args.server[0]) if not server: raise LookupError('Object "%s" does not exist' % args.server) except Exception as e: self.debug(e) self.error('No object like "%s" was found' % args.server) return 1 server = server[0] if not hasattr(server, 'console'): self.error('The object %s lacks a console method' % server.name) return 2 user = os.environ.get('USER') if args.user: self.debug('Grabbing user from parameter') user = args.user else: self.debug('Grabbing user from config file or default') user = self.get_conf('console.user', user) self.debug('User is "%s"' % user) return(server.console(ssh_user=user)) def main(): console, args = script_helper.init_arguments(Console) return(console.run(args)) if __name__ == '__main__': sys.exit(main())
from datetime import datetime ##################### # Account Test Data # ##################### account = { 'name': 'Test Account Name', 'type': 'Checking', 'bank_name': 'Bank of Catonsville', 'account_num': '1234567890' } account_put = { 'name': 'Savings Account', 'type': 'Savings' } db_account = { 'id': 'acct_testaccountname', 'name': 'Test Account Name', 'type': 'Checking', 'bank_name': 'Bank of Catonsville', 'account_num': '1234567890', 'bal_uncleared': 2635.63, 'bal_cleared': -40.92, 'bal_reconciled': 1021.61, 'budget_monitored': True } db_account_2 = { 'id': 'acct_toaccountname', 'name': 'To Account Name', 'type': 'Savings', 'bank_name': 'Bank of Catonsville', 'account_num': '0987654321', 'bal_uncleared': 100.00, 'bal_cleared': 100.00, 'bal_reconciled': 200.00, 'budget_monitored': False } db_account_3 = { 'id': 'acct_to2accountname', 'name': 'To 2 Account Name', 'type': 'Savings', 'bank_name': 'Bank of Catonsville', 'account_num': '0987654320', 'bal_uncleared': 500.00, 'bal_cleared': 500.00, 'bal_reconciled': 600.00, 'budget_monitored': False } ######################### # Transaction Test Data # ######################### transaction = { 'date': '2014-08-10', 'type': 'EFT', 'payee': 'Giant', # need: category/account, split -> consider fields.Nested 'reconciled': '', 'amount': -52.08, 'memo': '' } transaction_transfer = { 'date': '2014-08-10', 'type': 'XFER', 'payee': 'Move to Savings', 'reconciled': '', 'amount': -100.00, 'memo': '', 'cat_or_acct_id': 'acct_toaccountname' } transaction_put_amount = { # id = 53f69e77137a001e344259cb (Amazon.com) 'amount': -14.01, 'memo': 'Birthday present' } transaction_put_reconciled = { # id = 53f69e77137a001e344259cb (Amazon.com) 'reconciled': 'C' } transaction_put_amountreconciled = { # id = 53f69e77137a001e344259cb (Amazon.com) 'amount': -14.01, 'reconciled': 'C' } db_transactions= [ { 'id': '53f69e77137a001e344259c7', 'date': datetime(2014,7,31), 'type': 'DEP', 'payee': 'Sandy Spring Bank', 'reconciled': 'R', 'amount': 1145.06, 'memo': 'Sandy\'s Salary', 'cat_or_acct_id': '1' }, { 'id': '53f69e77137a001e344259c8',
'date': datetime(2014,8,1), 'type': 'EFT', 'payee': 'Costco', 'reconciled': 'R', 'amount': -123.45, 'memo': 'Test transaction memo', 'cat_or_acct_id': '2' }, { 'id': '53f69e77137a001e344259c9', 'date': datetime(2014,8,6), 'type': 'EFT', 'payee': 'Exxon', 'reconciled': 'C', 'amo
unt': -40.92, 'memo': '', 'cat_or_acct_id': '2' }, { 'id': '53f69e77137a001e344259ca', 'date': datetime(2014,8,18), 'type': 'DEP', 'payee': 'U.S. Government', 'reconciled': '', 'amount': 2649.52, 'memo': 'Kyle\'s Salary', 'cat_or_acct_id': '1' }, { 'id': '53f69e77137a001e344259cb', 'date': datetime(2014,8,12), 'type': 'EFT', 'payee': 'Amazon.com', 'reconciled': '', 'amount': -13.89, 'memo': '', 'cat_or_acct_id': '2' } ] db_transfer_transactions_fromAcct= [ { 'id': '53f69e77137a001e344259c7', 'date': datetime(2014,7,31), 'type': 'XFER', 'payee': 'To Savings', 'reconciled': 'C', 'amount': -100.00, 'memo': '', 'cat_or_acct_id': 'acct_toaccountname' }, { 'id': '53f69e77137a001e344259c8', 'date': datetime(2014,7,31), 'type': 'XFER', 'payee': 'To Savings', 'reconciled': 'C', 'amount': -100.00, 'memo': '', 'cat_or_acct_id': 'somecategoryidstring' } ] db_transfer_transactions_toAcct= [ { 'id': '53f69e77137a001e344259c7', 'date': datetime(2014,7,31), 'type': 'XFER', 'payee': 'To Savings', 'reconciled': 'R', 'amount': 100.00, 'memo': '', 'cat_or_acct_id': 'acct_testaccountname' } ] ################### # Payee Test Data # ################### payee = { 'name': 'Costco' } payee_put = { 'name': 'Newegg.com' } db_payees = [ { 'id': '53f69e77137a001e344259f1', 'name': 'Costco' }, { 'id': '53f69e77137a001e344259f2', 'name': 'Amazon.com' }, { 'id': '53f69e77137a001e344259f3', 'name': 'U.S. Government' }, { 'id': '53f69e77137a001e344259f4', 'name': 'Exxon' }, { 'id': '53f69e77137a001e344259f5', 'name': 'Sandy Spring Bank' } ] ###################### # Category Test Data # ###################### category_1 = { 'name': 'Tithe', 'parent_id': None } category_2 = { 'name': 'Gas & Electric', 'parent_id': '1234567890' } category_put = { 'parent_id': '1234567890' } db_categories = [ { 'id': '53f69e77137a001e344259f1', 'name': 'Auto', 'budget_tracked': False, 'parent_id': None }, { 'id': '53f69e77137a001e344259fa', 'name': 'Gas', 'budget_tracked': True, 'parent_id': '53f69e77137a001e344259f1' # Parent = Auto }, { 'id': '53f69e77137a001e344259fb', 'name': 'Service', 'budget_tracked': True, 'parent_id': '53f69e77137a001e344259f1' # Parent = Auto }, { 'id': '53f69e77137a001e344259f2', 'name': 'Dining & Entertainment', 'budget_tracked': True, 'parent_id': None }, { 'id': '53f69e77137a001e344259f3', 'name': 'Tithe', 'budget_tracked': True, 'parent_id': None } ]
# fabfile.py # TODO - Description. # ########################################################################### ## ## Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## #############
############################################################## from fabric.api import * from textwrap import dedent, wrap import io import re import pickle import sys import os import yaml script_dir = os.path.dirname(__file__) with open(script_dir+"/config.yaml", "r") as f: config = yaml.load(f) if os.path.isfile('config.yaml'): with open('config.yaml', 'r') as f: config.update(yaml.load(f)) else: print("Error: Current directory must have local application config.") sys.exit(-1) env.roledefs['master'] = config['master'] env.roledefs['workers'] = config['workers'] env.roledefs['all'] = config['all'] env.use_ssh_config = True @task def assembly(): local("sbt assembly &> assembly.log") @task def sync(): # put(config['local_jar_dir'] + '/' + config['jar'], config['remote_jar_dir']) for server in config['all']: local("rsync -azrv --progress {}/{} {}:/{}".format( config['local_jar_dir'], config['jar'], server, config['remote_jar_dir'] )) @task @roles('master') def start(): outIO = io.BytesIO(); errIO = io.BytesIO() sudo(' '.join([ config['remote_spark_dir'] + '/bin/spark-submit ', '--class', config['main_class'], '--master', config['spark_master'], '--deploy-mode', 'cluster', config['remote_jar_dir'] + '/' + config['jar'] ]), stdout=outIO, stderr=errIO) outIO.seek(0); errIO.seek(0) outStr = outIO.read() driverRe = re.search("State of (driver-\d*-\d*) is (\S*)", outStr) driverId = driverRe.group(1) status = driverRe.group(2) print(" DriverID: " + driverId) print(" Status: " + status) if status == "ERROR": msg = """ The error state occurs when the Spark Master rejects the job, which is likely due to a misconfiguration in the Spark context of your application. Once checking your Spark context for accuracy, next ssh into the node that failed and go to Spark work directory, which contains the output for Spark applicaitons and drivers. Check stderr and stdout in the driver and application directories. """ print(dedent(msg)) elif status == "RUNNING": driverServerRe = re.search("Driver running on (\S*):\d* ", outStr) driverServer = driverServerRe.group(1) print(" DriverServer: " + driverServer) with open('lastJobStarted.pickle', 'wb') as f: pickle.dump({ 'driverId': driverId, 'driverServer': driverServer }, f) else: print(status) @task @roles('master') def kill(driverId=None): if not driverId: try: with open('lastJobStarted.pickle', 'rb') as f: m = pickle.load(f) except IOError as e: print("Unable to open lastJobStarted.pickle") driverId = m['driverId'] sudo(' '.join([ config['remote_spark_dir'] + '/bin/spark-class ', "org.apache.spark.deploy.Client kill", config['spark_master'], driverId ])) @task def getOutput(driverId=None,driverServer=None): if not driverId: try: with open('lastJobStarted.pickle', 'rb') as f: m = pickle.load(f) except IOError as e: print("Unable to open lastJobStarted.pickle") sys.exit(-1) driverId = m['driverId'] driverServer = m['driverServer'] local("scp " + driverServer + ":" + config['spark_work'] + "/" + driverId + "/stdout " + "stdout.txt") local("scp " + driverServer + ":" + config['spark_work'] + "/" + driverId + "/stderr " + "stderr.txt")
# Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Contains data about certain markup, like HTML tags and external links. When updating this file, please also update the the C tokenizer version: - mwparserfromhell/parser/ctokenizer/definitions.c - mwparserfromhell/parser/ctokenizer/definitions.h """ __all__ = [ "get_html_tag", "
is_parsable", "is_visible", "is_single", "is_single_only", "is_scheme", ] URI_SCHEMES = { # [wikimedia/mediawiki.git]/includes/DefaultSettings.php @ 5c660de5d0 "bitcoin": False, "ftp": True, "ftps": True, "geo": False, "git": True, "gopher": True, "http": True, "https": True, "irc": True, "ircs": True, "magnet": False, "mailto": False, "mms": True, "news": False, "nntp": True, "redis": True, "sftp": True, "sip": False, "sips": False, "sms": False, "ssh": True, "svn": True, "tel": False, "telnet": True, "urn": False, "worldwind": True, "xmpp": False, } PARSER_BLACKLIST = [ # https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21 "categorytree", "ce", "chem", "gallery", "graph", "hiero", "imagemap", "inputbox", "math", "nowiki", "pre", "score", "section", "source", "syntaxhighlight", "templatedata", "timeline", ] INVISIBLE_TAGS = [ # https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21 "categorytree", "gallery", "graph", "imagemap", "inputbox", "math", "score", "section", "templatedata", "timeline", ] # [wikimedia/mediawiki.git]/includes/parser/Sanitizer.php @ 95e17ee645 SINGLE_ONLY = ["br", "wbr", "hr", "meta", "link", "img"] SINGLE = SINGLE_ONLY + ["li", "dt", "dd", "th", "td", "tr"] MARKUP_TO_HTML = { "#": "li", "*": "li", ";": "dt", ":": "dd", } def get_html_tag(markup): """Return the HTML tag associated with the given wiki-markup.""" return MARKUP_TO_HTML[markup] def is_parsable(tag): """Return if the given *tag*'s contents should be passed to the parser.""" return tag.lower() not in PARSER_BLACKLIST def is_visible(tag): """Return whether or not the given *tag* contains visible text.""" return tag.lower() not in INVISIBLE_TAGS def is_single(tag): """Return whether or not the given *tag* can exist without a close tag.""" return tag.lower() in SINGLE def is_single_only(tag): """Return whether or not the given *tag* must exist without a close tag.""" return tag.lower() in SINGLE_ONLY def is_scheme(scheme, slashes=True): """Return whether *scheme* is valid for external links.""" scheme = scheme.lower() if slashes: return scheme in URI_SCHEMES return scheme in URI_SCHEMES and not URI_SCHEMES[scheme]
if __name__ == '__main__': # We want to call _enable_attach inside an import to make sure that it works properly that way.
import
_debugger_case_wait_for_attach_impl
# -*- codeing: utf-8 -*- def bubble_sort(to_sort): index = 0 while index < len(to_sort): offset = index while offset > 0 and to_sort[offset - 1] > to_sort[offset]: temp = to_sort[offset] to_sort[offset] = to_sort[offset - 1] to_sort[offset - 1] = temp offset -= 1 index += 1 return to_sort def quick_sort(to_sort): result = [] if to_sort: eq = to_sort[0] lt, gt = _split_by(to_sort, eq) for e in quick_sort(lt): result.append(e) result.append(eq) for e in quick_sort(gt): result.append(e) return result def _split_by(to_sort, eq): lt = [] gt = [] for e in to_sort[1:]: if e < eq: lt.append(e) if e > eq: gt.append(e) return (lt, gt) import unittest class BubbleSortTest(unittest.TestCase): def test_sorts_empty_list(self): self.assertEqual([], bubble_sort([])) def test_sorts_single_element_list(self): self.assertEqual([1], bubble_sort([1])) def test_sorts_two_elements_sorted_list(self): self.assertEqual([1, 2], bubble_sort([1, 2])) def test_sorts_two_elements_unsorted_list(self): self.assertEqual([1, 2], bubble_sort([2, 1])) def test_sorts_three_elements_sorted_list(self): self.assertEqual([1, 2, 3], bubble_sort([1, 2, 3])) def test_sorts_2_1_3_list(self): self.assertEqual([1, 2, 3], bubble_sort([2, 1, 3])) def test_sorts_1_3_2_list(self): self.assertEqual([1, 2, 3], bubble_sort([1, 3, 2])) def test_sorts_3_2_1_list(self): self.assertEqual([1, 2, 3], bubble_sort([3, 2, 1])) class QuickSortTest(unittest.TestCase): def test_sorts_an_empty_list(self): self.assertEqual([], quick_sort([])) def test_sorts_single_element_list(self): self.assertEqual([1], quick_sort([1])) def test_sorts_two_elements_sorted_list(self): self.assertEqual([1, 2], quick_sort([1, 2])) def test_sorts_two_elements_unsorted_list(self): self.assertEqual([1, 2], quick_sort([2, 1])) def test_sorts_three_elements_sorted_list(self): self.assertEqual([1, 2, 3], quick_sort([1, 2, 3]))
def test_sorts_2_1_3_list(self): self.assertEqual([1, 2, 3], quick_sort([2, 1, 3])) def test_sorts_1_3_2_list(self): self.assertEqual([1, 2, 3], quick_sort([1, 3, 2])) def test_sorts_3_2_1_list(self): self.assertEqual([1, 2, 3], quick_sort([3, 2
, 1]))
duplicados=[] data = get_source(host) data = scrapertools.find_single_match(data, 'data-toggle="dropdown">Géneros.*?multi-column-dropdown">.*?"clearfix"') if 'Genero' in item.title: patron = '<li><a href="([^"]+)">([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle if title not in duplicados: itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all', type=item.type)) duplicados.append(title) return itemlist def list_all(item): logger.info() itemlist = [] data = get_source(item.url) if item.type == 'movies': patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">' patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?' patron += '<div class="calidad" >([^<]+)</div> <div class="audio-info">' patron += '(.*?)<div class="w3l-action-icon">.*?<p>([^<]+)</p>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, quality, lang_data, year in matches: title = '%s [%s] [%s]' % (scrapedtitle, year, quality) if 'screener' in quality.lower(): quality = 'Screener' contentTitle = scrapedtitle thumbnail = scrapedthumbnail url = scrapedurl language = get_language(lang_data) itemlist.append(item.clone(action='findvideos', title=title, url=url, thumbnail=thumbnail, contentTitle=contentTitle, language=language, quality=quality, infoLabels={'year':year})) elif item.type == 'tvshows': patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">' patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?<p>([^<]+)</p>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches: title = scrapedtitle contentSerieName = scrapedtitle thumbnail = scrapedthumbnail url = scrapedurl itemlist.append(item.clone(action='seasons', title=title, url=url, thumbnail=thumbnail, contentSerieName=contentSerieName, context=filtertools.context(item, list_language, list_quality), infoLabels={'year':year})) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Paginación url_next_page = scrapertools.find_single_match(data,"<a class='last' href='([^']+)'>»</a>") if url_next_page: itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all')) return itemlist def seasons(item): logger.info() itemlist=[] data=get_source(item.url)
patron='<a href="([^"]+)"><img class="thumb-item" src="([^"]+)" alt="[^"]+" >' patron += '<div class="season-item">Temporada (\d+)</div>' matches = re.compile(patron, re.DOTA
LL).findall(data) infoLabels = item.infoLabels for scrapedurl, scrapedthumbnail, season in matches: infoLabels['season']=season title = 'Temporada %s' % season itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons', thumbnail=scrapedthumbnail, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) return itemlist def episodios(item): logger.info() itemlist = [] templist = seasons(item) for tempitem in templist: itemlist += episodesxseasons(tempitem) return itemlist def episodesxseasons(item): logger.info() itemlist = [] data=get_source(item.url) patron ='class="row-serie-item"><a href="([^"]+)">.*?<img class="episode-thumb-item" src="([^"]+)" alt="([^"]+)" >' patron += '<divclass="audio-info-series">(.*?)<div class="episode-item">%s+x(\d+)</div>' % item.infoLabels['season'] matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, scrapedepisode in matches: infoLabels['episode'] = scrapedepisode url = scrapedurl language = get_language(lang_data) title = '%sx%s - %s %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle, language) itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', thumbnail=scrapedthumbnail, language=language, infoLabels=infoLabels)) itemlist = filtertools.get_links(itemlist, item, list_language) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist def findvideos(item): logger.info() from lib import generictools itemlist = [] data = get_source(item.url) patron = '<div class="available-source" ><div class="([^"]+)">.*?' patron += 'data-data="([^"]+)".*?<span class="quality-text">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) for lang_data, scrapedurl, quality in matches: lang = get_language(lang_data) if 'screener' in quality.lower(): quality = 'Screener' quality = quality title = '%s [%s] [%s]' url = base64.b64decode(scrapedurl[1:]) itemlist.append( Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang], infoLabels=item.infoLabels)) itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.quality, x.language)) # Requerido para Filtrar enlaces if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language, list_quality) # Requerido para AutoPlay autoplay.start(itemlist, item) itemlist = sorted(itemlist, key=lambda it: it.language) if item.contentType != 'episode': if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = item.url + texto if texto != '': return search_results(item) else: return [] def search_results(item): logger.info() itemlist=[] data=get_source(item.url) patron = '<li class="search-results-item media-item" .*?<a href="([^"]+)" title="([^"]+)">.*?' patron += '<img class="content" src="([^"]+)" .*?>(Pelicula|Serie) del año([^<]+)</p>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumb, content_type, year in matches: title = scrapedtitle if len(year)==0: year = '-' url = scrapedurl thumbnail = scrapedthumb if not '/ser
string, returns "file", "template". If the string is of the form "file" (without a template), returns "file", "file.in".""" if ':' in s: return s.split(':', 1) return s, '%s.in' % s def get_config_files(data): config_status = mozpath.join(data['objdir'], 'config.status') if not os.path.exists(config_status): return [], [] configure = mozpath.join(data['srcdir'], 'configure') config_files = [] command_files = [] # Scan the config.status output for information about configuration files # it generates. config_status_output = subprocess.check_output( [data['shell'], '-c', '%s --help' % config_status], stderr=subprocess.STDOUT).splitlines() state = None for line in config_status_output: if line.startswith('Configuration') and line.endswith(':'): if line.endswith('commands:'): state = 'commands' else: state = 'config' elif not line.strip(): state = None elif state: for f, t in (split_template(couple) for couple in line.split()): f = mozpath.join(data['objdir'], f) t = mozpath.join(data['srcdir'], t) if state == 'commands': command_files.append(f) else: config_files.append((f, t)) return config_files, command_files def prepare(srcdir, objdir, shell, args): parser = argparse.ArgumentParser() parser.add_argument('--target', type=str) parser.add_argument('--host', type=str) parser.add_argument('--build', type=str) parser.add_argument('--cache-file', type=str) # The --srcdir argument is simply ignored. It's a useless autoconf feature # that we don't support well anyways. This makes it stripped from `others` # and allows to skip setting it when calling the subconfigure (configure # will take it from the configure path anyways). parser.add_argument('--srcdir', type=str) data_file = os.path.join(objdir, CONFIGURE_DATA) previous_args = None if os.path.exists(data_file): with open(data_file, 'rb') as f: data = pickle.load(f) previous_args = data['args'] # Msys likes to break environment variables and command line arguments, # so read those from stdin, as they are passed from the configure script # when necessary (on windows). # However, for some reason, $PATH is not handled like other environment # variables, and msys remangles it even when giving it is already a msys # $PATH. Fortunately, the mangling/demangling is just find for $PATH, so # we can just take the value from the environment. Msys will convert it # back properly when calling subconfigure. input = sys.stdin.read() if input: data = {a: b for [a, b] in eval(input)} environ = {a: b for a, b in data['env']} environ['PATH'] = os.environ['PATH'] args = data['args'] else: environ = os.environ args, others = parser.parse_known_args(args) data = { 'target': args.target, 'host': args.host, 'build': args.build, 'args': others, 'shell': shell, 'srcdir': srcdir, 'env': environ, } if args.cache_file: data['cache-file'] = mozpath.normpath(mozpath.join(os.getcwd(), args.cache_file)) else: data['cache-file'] = mozpath.join(objdir, 'config.cache') if previous_args is not None: data['previous-args'] = previous_args try: os.makedirs(objdir) except OSError as e: if e.errno != errno.EEXIST: raise with open(data_file, 'wb') as f: pickle.dump(data, f) def prefix_lines(text, prefix): return ''.join('%s> %s' % (prefix, line) for line in text.splitlines(True)) def run(objdir): ret = 0 output = '' with open(os.path.join(objdir, CONFIGURE_DATA), 'rb') as f: data = pickle.load(f) data['objdir'] = objdir cache_file = data['cache-file'] cleared_cache = True if os.path.exists(cache_file): cleared_cache = maybe_clear_cache(data) config_files, command_files = get_config_files(data) contents = [] for f, t in config_files: contents.append(File(f)) # AC_CONFIG_COMMANDS actually only registers tags, not file names # but most commands are tagged with the file name they create. # However, a few don't, or are tagged with a directory name (and their # command is just to create that directory) for f in command_files: if os.path.isfile(f): contents.append(File(f)) # Only run configure if one of the following is true: # - config.status doesn't exist # - config.status is older than configure # - the configure arguments changed # - the environment changed in a way that requires a cache clear. configure = mozpath.join(data['srcdir'], 'configure') config_status_path = mozpath.join(objdir, 'config.status') skip_configure = True if not os.path.exists(config_status_path): skip_configure = False config_status = None else: config_status = File(config_status_path) if config_status.mtime < os.path.getmtime(configure) or \ data.get('previous-args', data['args']) != data['args'] or \ cleared_cache: skip_configure = False relobjdir = os.path.relpath(objdir, os.getcwd()) if not skip_configure: command = [data['shell'], configure] for kind in ('target', 'build', 'host'): if data.get(kind) is not None: command += ['--%s=%s' % (kind, data[kind])] command += data['args'] command += ['--cache-file=%s' % cache_file] # Pass --no-create to configure so that it doesn't run config.status. # We're going to run
it ourselves. command += ['--no-create'] print prefix_lines('configuring', relobjdir) print prefix_lines('running %s' % ' '.join(command[:-1]), relobjdir) sys.stdout.flush() try: output += subprocess.check_out
put(command, stderr=subprocess.STDOUT, cwd=objdir, env=data['env']) except subprocess.CalledProcessError as e: return relobjdir, e.returncode, e.output # Leave config.status with a new timestamp if configure is newer than # its original mtime. if config_status and os.path.getmtime(configure) <= config_status.mtime: config_status.update_time() # Only run config.status if one of the following is true: # - config.status changed or did not exist # - one of the templates for config files is newer than the corresponding # config file. skip_config_status = True if not config_status or config_status.modified: # If config.status doesn't exist after configure (because it's not # an autoconf configure), skip it. if os.path.exists(config_status_path): skip_config_status = False else: # config.status changed or was created, so we need to update the # list of config and command files. config_files, command_files = get_config_files(data) for f, t in config_files: if not os.path.exists(t) or \ os.path.getmtime(f) < os.path.getmtime(t): skip_config_status = False if not skip_config_status: if skip_configure: print prefix_lines('running config.status', relobjdir) sys.stdout.flush() try: output += subprocess.check_output([data['shell'], '-c', './config.status'], stderr=subprocess.STDOUT, cwd=objdir, env=data['env']) except subprocess.CalledProcessError as e: ret = e.returncode output += e.output for f in contents: f.update_time() return relobjdir, ret, output def subconfigure(args): parser = argparse.ArgumentParser() parser.add_argument('--list', type=str, help='File containing a
#!/usr/bin/env python import glob import os import sys import unittest import common if len(sys.argv) > 1: builddir = sys.argv[1] no_import_hooks = True else: builddir = '..' no_import_hooks = False common.run_import_tests(builddir, no_import_hooks) SKIP_FILES = ['common', 'runtests'] dir = os.path.split(os.path.abspath(__file__))[0] os.chdir(dir) def gettestnames(): files = [fname[:-3] for fname in glob.glob('test*.py') if fname
not in SKIP_FILES] return files suite = unittest.TestSuite() loader = unittest.TestLoader() for name in gettestnames(): suite.a
ddTest(loader.loadTestsFromName(name)) testRunner = unittest.TextTestRunner() testRunner.run(suite)
# Kevin Nash (kjn33) # EECS 293 # Assignment 12 from entity import Entity from random import randint class Passenger(Entity): """ Entities that need to be checked in following queueing """ def __init__(self): """ Passengers follow Entity initialization, are randomly given special parameters """ super(Passenger, self).__init__()
# 50% chance of being a frequent flyer self.frequent = randint(1, 2) % 2 == 0 # 10% chance of having a given special condition self.oversize = randint(1, 10) % 10 == 0 self.rerouted = randint(1, 10) % 10 == 0 self.
overbook = randint(1, 10) % 10 == 0 self.time = 2 self.calc_time() def __str__(self): """ Represent Passenger by name, ID, and flyer type """ flyer_type = "regular" if self.frequent: flyer_type = "frequent" return "%s %d (%s)" % (self.__class__.__name__, self.id, flyer_type) def calc_time(self): """ Set the time required for check in based on special parameters """ if self.oversize: self.time += 2 if self.rerouted: self.time += 2 if self.overbook: self.time += 2
import pygame pygame.init() screen = pygame.display.s
et_mode((400, 300)) done = False while not done: for event in pygame.event.get(): if event.type == pygame.QUIT: done = True pygame.displa
y.flip()
from twisted.internet import protocol, reactor class Echo(pro
tocol.Protocol): def dataReceived(self, data): self.transport.write(data) class EchoFactory(protocol.Factory): def buildProtocol(self, addr): return Echo() reactor.listenTCP(1234
, EchoFactory()) reactor.run()
#!/usr/bin/env python '''em_dict_basic.py - Basic benchmark for external memory dictionary.''' __author__ = 'huku <huku@grhack.net>' imp
ort sys import shutil import random import time import util import pyrsistence def main(argv): # Initialize new external memory dictionary. util.msg('Populating external memory dictionary') t1 = time.time() dirname = util.make_temp_name('em_dict') em_dict = pyrsisten
ce.EMDict(dirname) for i in util.xrange(0x1000000): em_dict[i] = i t2 = time.time() util.msg('Done in %d sec.' % (t2 - t1)) # Close and remove external memory dictionary from disk. em_dict.close() shutil.rmtree(dirname) return 0 if __name__ == '__main__': sys.exit(main(sys.argv)) # EOF
# -*- coding: utf-8 -*- # $Id$ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Traditional Chinese language mappings for language-dependent features of reStructuredText. """ __docformat__ = 'reStructuredText' directives = { # language-dependent: fixed 'attention (translation required)': 'attention', 'caution (translation required)': 'caution', 'code (translation required)': 'code', 'danger (translation required)': 'danger', 'error (translation required)': 'error', 'hint (translation required)': 'hint', 'important (translation required)': 'important', 'note (translation required)': 'note', 'tip (translation required)': 'tip', 'warning (translation required)': 'warning', 'admonition (translation required)': 'admonition', 'sidebar (translation required)': 'sidebar', 'topic (translation required)': 'topic', 'line-block (translation required)': 'line-block', 'parsed-literal (translation required)': 'parsed-literal', 'rubric (translation required)': 'rubric', 'epigraph (translation required)': 'epigraph', 'highlights (translation required)': 'highlights', 'pull-quote (translation required)': 'pull-quote', 'compound (translation required)': 'compound', u'container (translation required)': 'container', #'questions (translation required)': 'questions', 'table (translation required)': 'table', 'csv-table (translation required)': 'csv-table', 'list-table (translation required)': '
list-table', #'qa (translation required)': 'questions', #'faq (translation required)': 'questions', 'meta (translation required)': 'meta', 'math (translation required)': 'math', #'imagemap (translation required)': 'imagemap', 'image (translation required)': 'image', 'figure (translation required)': 'figure', 'include (translation required)': 'include', 'raw (translation required)': 'raw',
'replace (translation required)': 'replace', 'unicode (translation required)': 'unicode', u'日期': 'date', 'class (translation required)': 'class', 'role (translation required)': 'role', u'default-role (translation required)': 'default-role', u'title (translation required)': 'title', 'contents (translation required)': 'contents', 'sectnum (translation required)': 'sectnum', 'section-numbering (translation required)': 'sectnum', u'header (translation required)': 'header', u'footer (translation required)': 'footer', #'footnotes (translation required)': 'footnotes', #'citations (translation required)': 'citations', 'target-notes (translation required)': 'target-notes', 'restructuredtext-test-directive': 'restructuredtext-test-directive'} """Traditional Chinese name to registered (in directives/__init__.py) directive name mapping.""" roles = { # language-dependent: fixed 'abbreviation (translation required)': 'abbreviation', 'ab (translation required)': 'abbreviation', 'acronym (translation required)': 'acronym', 'ac (translation required)': 'acronym', u'code (translation required)': 'code', 'index (translation required)': 'index', 'i (translation required)': 'index', 'subscript (translation required)': 'subscript', 'sub (translation required)': 'subscript', 'superscript (translation required)': 'superscript', 'sup (translation required)': 'superscript', 'title-reference (translation required)': 'title-reference', 'title (translation required)': 'title-reference', 't (translation required)': 'title-reference', 'pep-reference (translation required)': 'pep-reference', 'pep (translation required)': 'pep-reference', 'rfc-reference (translation required)': 'rfc-reference', 'rfc (translation required)': 'rfc-reference', 'emphasis (translation required)': 'emphasis', 'strong (translation required)': 'strong', 'literal (translation required)': 'literal', 'math (translation required)': 'math', 'named-reference (translation required)': 'named-reference', 'anonymous-reference (translation required)': 'anonymous-reference', 'footnote-reference (translation required)': 'footnote-reference', 'citation-reference (translation required)': 'citation-reference', 'substitution-reference (translation required)': 'substitution-reference', 'target (translation required)': 'target', 'uri-reference (translation required)': 'uri-reference', 'uri (translation required)': 'uri-reference', 'url (translation required)': 'uri-reference', 'raw (translation required)': 'raw',} """Mapping of Traditional Chinese role names to canonical role names for interpreted text."""
ame(u'digiapproval_message_last_read_by') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('message', models.ForeignKey(orm[u'digiapproval.message'], null=False)), ('user', models.ForeignKey(orm[u'auth.user'], null=False)) )) db.create_unique(m2m_table_name, ['message_id', 'user_id']) def backwards(self, orm): # Removing M2M tabl
e for field last_read_by on 'Message' db.delete_table(db.shorten_name(u'digiapproval_message_last_read_by')) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'digiapproval.customeraccount': { 'Meta': {'object_name': 'CustomerAccount'}, 'account_type': ('django.db.models.fields.CharField', [], {'default': "'CUSTOMER'", 'max_length': '16'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sub_accounts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['digiapproval.CustomerAccount']"}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}) }, u'digiapproval.message': { 'Meta': {'object_name': 'Message'}, '_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_read_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'last_read'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'message': ('django.db.models.fields.TextField', [], {}), 'posted': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"}) }, u'digiapproval.task': { 'Meta': {'object_name': 'Task'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'task': ('jsonfield.fields.JSONField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': "'36'"}), 'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"}) }, u'digiapproval.userfile': { 'Meta': {'object_name': 'UserFile'}, '_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'virus_status': ('django.db.models.fields.CharField', [], {'default': "'UNSCANNED'", 'max_length': '16'}) }, u'digiapproval.workflow': { 'Meta': {'object_name': 'Workflow'}, 'approver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_approver'", 'to': u"orm['auth.User']"}), 'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_customer'", 'to': u"orm['digiapproval.CustomerAccount']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'spec': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.WorkflowSpec']"}), 'state': ('django.db.models.fields.CharField', [], {'default': "'STARTED'", 'max_length': '10'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'70558195da6a4488b22d6e8749f86580'", 'max_length': '36'}), 'workflow': ('digiapproval_project.apps.digiapproval.fields.WorkflowField', [], {}) }, u'digiapproval.workflowspec': { 'Meta': {'object_name': 'WorkflowSpec'}, 'approvers': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_approvers'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}), 'delegators': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_delegators'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': "'64'"}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflowspecs_owner'", 'to': u"orm['auth.Group']"}), 'public': ('django.db.mod
# -*-coding:Utf-8 -* # Copyright (c) 2013 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior wri
tten permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT #
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Package contenant la commande 'auberge liste'.""" from primaires.interpreteur.masque.parametre import Parametre class PrmListe(Parametre): """Commande 'auberge liste'""" def __init__(self): """Constructeur du paramètre.""" Parametre.__init__(self, "liste", "list") self.aide_courte = "affiche les auberges existantes" self.aide_longue = \ "Cette commande permet de lister les auberges existantes." def interpreter(self, personnage, dic_masques): """Méthode d'interprétation de commande.""" auberges = sorted([a for a in importeur.auberge.auberges.values()], key=lambda a: a.cle) if auberges: en_tete = "+-" + "-" * 15 + "-+-" + "-" * 25 + "-+-" + \ "-" * 8 + "-+-" + "-" * 6 + "-+" msg = en_tete + "\n" msg += "| Clé | Salle | " \ "Chambres | Occupé |\n" msg += en_tete for auberge in auberges: cle = auberge.cle ident = auberge.ident_comptoir nb_chambres = len(auberge.chambres) pct_occupation = auberge.pct_occupation msg += "\n| {:<15} | {:<25} | {:>8} | {:>5}% |".format( cle, ident, nb_chambres, pct_occupation) msg += "\n" + en_tete personnage << msg else: personnage << "Aucune auberge n'existe pour l'heure."
# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import config as agent_con
f from neutron.agent.metadata import agent from neutron.agent.metadata import config as metadata_conf from neutron.common import config from neutron.common import utils from neutron.openstack.common.cache import cache LOG = logging.getLogger(__name__) def main(): cfg.CONF.register_opts(metadata_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS) cfg.CONF.register_opts(m
etadata_conf.METADATA_PROXY_HANDLER_OPTS) cache.register_oslo_configs(cfg.CONF) cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5') agent_conf.register_agent_state_opts_helper(cfg.CONF) config.init(sys.argv[1:]) config.setup_logging() utils.log_opt_values(LOG) # metadata agent need not connect DB cfg.CONF.set_override("connection", "", "database") proxy = agent.UnixDomainMetadataProxy(cfg.CONF) proxy.run()
e is no datacenter defined.") sys.exit(1) self.logger.debug('Configuration seems sane.') def _before_connect(self, url=None, rpc=None, routing_key=None): pass # same behaviour as masta def _after_connect(self): self.rpc.set_json_encoder(StorageJSONEncoder) self.storage = Endpoint(self.rpc, self.logger) # Check if all the datacenters are also registered in Storage, if not, register them storage_datacenters = self.storage.get('datacenter', options=[], filters={}) def connect(self, url=None, rpc=None, routing_key=None): # fake being masta, so we don't have to change other code super(Pasta, self).connect(self.config["rpc"]["url"], None, "decaf_masta") @In("datacenter_id", int) @Out("success_code", int) def initialize_datacenter(self, datacenter_config): """ Reimplemented method of decaf_masta :param datacenter_config: A DatacenterConfig object describing the datacenter to be added. :return: The id of the new entry. """ self.logger.info("Call to initialize_datacenter") return 0 @In("keystone_credentials", dict) @Out("keystone_id", int) def create_keystone_credentials(self, keystone_credentials): self.logger.info("Call to create_keystone_credentials") return 0 @In("keystone_id", int) @Out("keystone_credentials", dict) def get_keystone_credentials(self, keystone_id): """ Gets a keystone entry from the database. :param keystone_id: T
he id of the database entry. :return: The data of the keystone entry with the given id, or an error code if not found. """ return 400 @Out("keystone_list", list) def get_keystones(self): """ Get keystone entries contained in the database. :return: A list of keystone entries currently existing in the Masta database. """ return None # ---------------------------------------------------
------- # DATACENTERS # Every datacenter has a respective set of keystone credentials and a region. # Keystone does not have to be installed on the actual datacenter, but could. # ---------------------------------------------------------- @In("datacenter", dict) @Out("datacenter_id", int) def create_datacenter(self, datacenter): """ Adds a datacenter entry to the database. :param datacenter: A Datacenter dictionary containing information of the datacenter. :return: The id of the new entry in the database. """ return int(datacenter.datacenter_id) @Out("datacenter_list", list) def get_datacenters(self): """ Get datacenter entries contained in the database. :return: A list of datacenter entries currently existing in the Masta database. """ return [datacenter.to_dict() for datacenter in self.datacenters] @In("datacenter_id", int) @Out("datacenter_stats", dict) def get_datacenter_stats(self, datacenter_id): """ Returns information about the datacenter. :param datacenter_id: The id of the datacenter. :return: A list of datacenter entries currently existing in the Masta database """ return datacenter_stats @In("datacenter_id", int) @Out("ip_namespace", str) def get_datacenter_ip_namespace(self, datacenter_id): """ Returns the name of the IP namespace of the router on the given datacenter. :param datacenter_id: The masta id of the datacenter. :return: IP namespace name. """ ip_namespace = "qrouter-1" return ip_namespace # ---------------------------------------------------------- # DEPLOY SCENARIO # A scenario is deployed in two steps: First, the edges are created. # Secondly, the nodes are created. # If the process fails at one step, MaSta will rollback the deployment. # ---------------------------------------------------------- @In("instance_graph", dict) @Out("instance_graph", dict) def deploy_scenario(self, instance_graph): """ Deploy scenario on the infrastructure. :param instance_graph: An object of type InstanceGraph to be deployed. :return: The modified instance graph with ips and keynames, if successful. """ return instance_graph # ---------------------------------------------------------- # DESTROY SCENARIO # Deletes all the nodes and edges and removes # the scenario from the database. # ---------------------------------------------------------- @In("scenario_instance_id", str) @Out("success_code", int) def destroy_scenario(self, scenario_instance_id): """ Destroy scenario by deleting all its nodes and removing from database. :param scenario_instance_id: The id of the scenario instance. :return: 200, if successful. 404, if not found. """ return 200 @Out("success_code", int) def destroy_all_scenarios(self): """ Destroys all scenarios in the MaSta database. :return: 200, if successful. """ return 200 # ---------------------------------------------------------- # ALTER SCENARIO # Methods to change a running scenario. # ---------------------------------------------------------- @In("instance_graph", dict) @Out("instance_graph", dict) def extend_scenario(self, instance_graph): """ Method to extend an existing scenario. :param instance_graph: An InstanceGraph with all the nodes and edges to add. :return: 200, if successful. """ return 200 @In("shrink_graph", dict) @Out("success_code", int) def shrink_scenario(self, shrink_graph): """ Method to shrink an existing scenario. :param shrink_graph: An object of type InstanceGraph that lists all the nodes and edges to delete. :return: 200, if successful. """ return 200 # ---------------------------------------------------------- # INTERNAL SCENARIO METHODS # Internal methods for creation and deletion # of nodes and edges. # ---------------------------------------------------------- def create_nodes(self, instance_graph, session): """ Internal method to create nodes in database and deploy the nodes on the infrastructure. :param instance_graph: The graph of the scenario. :param session: The session object. :return: """ pass def create_edges(self, instance_graph, session): """ Internal method to create edges in the database and set up the networks in OpenStack. :param instance_graph: The graph of the scenario. :param session: The session object. :return: """ pass def rollback(self, instance_graph, session, del_scenario=False): """ Internal method to rollback the creation or altering of a scenario. :param instance_graph: The graph of the scenario. :param session: The session object. :return: """ pass def delete_nodes(self, vm_instance_id_list, session): """ Internal method to delete nodes from a scenario. :param scenario_instance_id: The id of the scenario. :param session: The session object. :return: 200, if successful. """ return 200 def delete_edges(self, edge_list, session): """ Internal method to delete edges from a scenario. :param edge_list: A list containing objects of internal edges, management ports and public ports from the db. :param session: The session object. :return: """ pass # ---------------------------------------------------------- # ACTIONS # Perform actions on the VMS. # ---------------------------------------------------------- @In("vm_action", dict) @Out("success_code"
from neo.Storage.Common.DataCache import DataCache class CloneCache(DataCache): def __init_
_(self, innerCache): super(CloneCache, self).__init__() self.innerCache = innerCache def AddInternal(self, key, value): self.innerCache.Add(key, value) def DeleteInternal(self, key): self.innerCache.Delete(key) def FindInternal(self, key_prefix): for k, v in self.innerCache.Find(key_prefix): yield k, v.Clone() def GetInternal(self, key): return self.inner
Cache[key].Clone() def TryGetInternal(self, key): res = self.innerCache.TryGet(key) if res is None: return None else: return res.Clone() def UpdateInternal(self, key, value): self.innerCache.GetAndChange(key).FromReplica(value)
#!/usr/bin/env python #Copyright (c) 2010 Gerson Minichiello # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (
the "Software"), to deal #in the Software without restriction, including without l
imitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. import urllib from HTMLParser import HTMLParser class PyOpenGraph(object): types = {'activity':['activity', 'sport'], 'business':['bar', 'company', 'cafe', 'hotel', 'restaurant'], 'group':['cause' 'sports_league' 'sports_team'], 'organization':['band', 'government', 'non_profit', 'school', 'university'], 'person':['actor', 'athlete', 'author', 'director', 'musician', 'politician', 'public_figure'], 'place':['city', 'country', 'landmark', 'state_province'], 'product':['album', 'book', 'drink', 'food', 'game', 'isbn', 'movie', 'product', 'song', 'tv_show', 'upc'], 'website':['article', 'blog', 'website']} def __init__(self, url): f = urllib.urlopen(url) contents = f.read() f.close() p = PyOpenGraphParser() p.feed(contents) p.close() self.metadata = p.properties def is_valid(self): required = set(['title', 'type', 'image', 'url']) if (set(self.metadata.keys()).intersection(required)) == required: return True else: return False def __str__(self): return self.metadata['title'] class PyOpenGraphParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.properties = {} def handle_starttag(self, tag, attrs): if tag == 'meta': attrdict = dict(attrs) if attrdict.has_key('property') and attrdict['property'].startswith('og:') and attrdict.has_key('content'): self.properties[attrdict['property'].replace('og:', '')] = attrdict['content'] def handle_endtag(self, tag): pass def error(self, msg): pass if __name__ == '__main__': # Usage og = PyOpenGraph('http://www.rottentomatoes.com/m/10011268-oceans/') print og.metadata print og.metadata['title']
defined in # absolute terms ex. miles/gallon for subsectors with inputs defined # in energy service terms ex. kilometers to consistent efficiency # units of energy_unit/service_demand_unit ex. gigajoule/kilometer if eff_def == 'absolute' and sd_unit_type == 'service': eff = util.efficiency_convert(getattr(stock.techs[ID], efficiency_key), stock.techs[ID].efficiency_numerator_unit, stock.techs[ID].efficiency_denominator_unit, energy_unit, sd_unit) eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages) clean_eff_numerator_unit = energy_unit clean_eff_denominator_unit = sd_unit # no conversion is used if the service_demand unit is energy, as # the efficiency values will be normalized in later calculations elif eff_def == 'absolute' and sd_unit_type == 'energy': eff = getattr(stock.techs[ID], efficiency_key) eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages) clean_eff_numerator_unit = stock.techs[ID].efficiency_numerator_unit clean_eff_denominator_unit = stock.techs[ID].efficiency_denominator_unit # converts efficiency values for reference technologies # that are defined in absolute terms ex. miles/gallon for # subsectors with inputs defined in energy service terms ex. # kilometers to consistent efficiency units of # energy_unit/service_demand_unit ex. gigajoule/kilometer elif eff_def == "relative" and sd_unit_type == 'service': ref_eff = util.efficiency_convert( getattr(stock.techs[ref_ID], efficiency_key), stock.techs[ref_ID].efficiency_numerator_unit, stock.techs[ref_ID].efficiency_denominator_unit, energy_unit, sd_unit) ref_eff = TimeSeries.clean(ref_eff, extrapolation_method="nearest", newindex=vintages) eff = getattr(stock.techs[ID], efficiency_key) eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages) eff *= ref_eff clean_eff_numerator_unit = energy_unit clean_eff_denominator_unit = sd_unit # no conversion is used if the service_demand unit is energy, as # the efficiency values will be normalized in later calculations. # efficiency values are multiplied by reference technology efficiencies else: ref_eff = getattr(stock.techs[ref_ID], efficiency_key) ref_eff = TimeSeries.clean(ref_eff, extrapolation_method="nearest", newindex=vintages) eff = getattr(stock.techs[ID], efficiency_key) eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages) eff *= ref_eff clean_eff_numerator_unit = stock.techs[ref_ID].efficiency_numerator_unit clean_eff_denominator_unit = stock.techs[ref_ID].efficiency_denominator_unit decay_df = stockrollover.vintage_age(years, vintages) decay_df *= stockrollover.vintage_exist(years, vintages) if eff_def == "absolute": decay_df = 1 - (decay_df * getattr(stock.techs[ID], decay)) else: decay_df = 1 - (decay_df * getattr(stock.techs[ref_ID], decay)) eff = eff.transpose() eff = (decay_df.values * eff.values, years, vintages) setattr(stock.techs[ID], 'clean_%s_efficiency' % efficiency, eff) setattr(stock.techs[ID], 'clean_%s_efficiency_numerator_unit' % efficiency, clean_eff_numerator_unit) setattr(stock.techs[ID], 'clean_%s_efficiency_denominator_unit' % efficiency, clean_eff_denominator_unit) def stock_efficiency(self): sd_unit_type = self.service_demand.unit_type if sd_unit_type == 'energy': # ============================================================================== # in order to calculate a normalized efficiency for a stock, which is # used when the service demand is defined in energy terms, all # absolute efficiency values must be in the same units. This code converts # all efficiency values to the same units. # ============================================================================== primary_key = self.stock.techs[min(self.sto
ck.techs.keys())] setattr(self.stock, 'primary_efficiency_ID', primary_key) setattr(self.stock, 'primary_efficiency_numerator_unit', stock.techs[primary_key].clean_main_efficiency_numerator_unit) setattr(self.stock, 'primary_efficiency_denominator_unit', stock.techs[primary_key].clean_main_efficiency_denominator_unit) for key in self.stock.techs: for eff_type i
n ['main', 'aux']: data = getattr(self.stock.techs[key], 'clean_%s_efficiency' % eff_type) unit_from_denominator = getattr(self.stock.techs[key], 'clean_%s_efficiency_denominator_unit' % eff_type) unit_from_numerator = getattr(self.stock.techs[key], 'clean_%s_efficiency_numerator_unit' % eff_type) unit_to_denominator = getattr(self.stock, 'primary_efficiency_denominator_unit') unit_to_numerator = getattr(self.stock, 'primary_efficiency_numerator_unit') eff = util.efficiency_convert(data, unit_from_numerator, unit_from_denominator, unit_to_numerator, unit_to_denominator) class DemandStock(Stock): """ Demand-side equipment stock. Attributes ---------- final_energy_list : list ex. {"electricity", "pipeline gas"} List of final_energy types demanded by techs in the stock. stocksubsector: instance of class StockSubsector """ # def __init__(self): # self.service_demand = ServiceDemand() # # def add_service_demand(self): # self.service_demand = ServiceDemand() # # def cf_to_unit(self, unit, service_demand_unit): # """converts capacity factor stock units to energy output units based on service demand unit""" # # def tech_lookup(self, unit, stocksubsector): # """return dictionary techs from tech database based on a lookup of unit and stockSubsector.""" # # def final_energy_list(self, techs): # """return all final energy types from attributes of tech dictionary""" # # def add_demand_techs(self, demand_tech_id): # if demand_tech_id in self.techs: # return # self.techs[demand_tech_id] = technology.DemandTech(demand_tech_id) class Stock(DataMapFunctions): """ """ def __init__(self, drivers, ID, technology_id=None, **kwargs): self.ID = ID self.sql_ID_table = cfgfile.get('db_table_name', 'DemandStock') self.sql_data_table = cfgfile.get('db_table_name', 'DemandStockData') self.technology_id = technology_id self.drivers = drivers self.mapped = False for col, att in util.object_att_from_table(self.sql_ID_table, ID): setattr(self, col, att) DataMapFunctions.__init__(self) self.read_timeseries_data() # self.project('DemandStock', 'service_demand_dependent') # @staticmethod def stock_new(var, **kwargs): print kwargs['asdf'] # def stock_growth(self): # """combines driver and intensity attributes into a stock growth projection""" # # def rollover(self): # """used to perform stock rollover calculations and produce stock compositions matrices of stock units by tech""" # # def stock_normal(self): # """normalizes the stock for weighted average calculations""" # # def stock_additions(self): # """calculates annual stock additions as a
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-bad-import-order,unused-import """Tests the graph freezing tool.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import os from tensorflow.examples.image_retraining import retrain from tensorflow.python.framework import test_util class ImageRetrainingTest(test_util.TensorFlowTestCase): def dummyImageLists(self): return {'label_one': {'dir': 'somedir', 'training': ['image_one.jpg', 'image_two.jpg'], 'testing': ['image_three.jpg', 'image_four.jpg'], 'validation': ['image_five.jpg', 'image_six.jpg']}, 'label_two': {'dir': 'otherdir', 'training': ['image_one.jpg', 'image_two.jpg'], 'testing': ['image_three.jpg', 'image_four.jpg'], 'validation': ['image_five.jpg', 'image_six.jpg']}} def testGetImagePath(self): image_lists = self.dummyImageLists() self.assertEqual('image_dir/somedir/image_one.jpg', retrain.get_image_path( image_lists, 'label_one', 0, 'image_dir', 'training')) self.assertEqual('image_dir/otherdir/image_fou
r.jpg', retrain.get_image_path(image_lists, 'label_two', 1, 'image_dir', 'testing')) def testGetBottleneckPath(self): image_lists = self.dummyImageLists() self.assertEqual('bottleneck_dir/somedir/image_five.jpg_imagenet_v3.txt',
retrain.get_bottleneck_path( image_lists, 'label_one', 0, 'bottleneck_dir', 'validation', 'imagenet_v3')) def testShouldDistortImage(self): self.assertEqual(False, retrain.should_distort_images(False, 0, 0, 0)) self.assertEqual(True, retrain.should_distort_images(True, 0, 0, 0)) self.assertEqual(True, retrain.should_distort_images(False, 10, 0, 0)) self.assertEqual(True, retrain.should_distort_images(False, 0, 1, 0)) self.assertEqual(True, retrain.should_distort_images(False, 0, 0, 50)) def testAddInputDistortions(self): with tf.Graph().as_default(): with tf.Session() as sess: retrain.add_input_distortions(True, 10, 10, 10, 299, 299, 3, 128, 128) self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortJPGInput:0')) self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortResult:0')) @tf.test.mock.patch.object(retrain, 'FLAGS', learning_rate=0.01) def testAddFinalTrainingOps(self, flags_mock): with tf.Graph().as_default(): with tf.Session() as sess: bottleneck = tf.placeholder( tf.float32, [1, 1024], name='bottleneck') retrain.add_final_training_ops(5, 'final', bottleneck, 1024) self.assertIsNotNone(sess.graph.get_tensor_by_name('final:0')) def testAddEvaluationStep(self): with tf.Graph().as_default(): final = tf.placeholder(tf.float32, [1], name='final') gt = tf.placeholder(tf.float32, [1], name='gt') self.assertIsNotNone(retrain.add_evaluation_step(final, gt)) def testAddJpegDecoding(self): with tf.Graph().as_default(): jpeg_data, mul_image = retrain.add_jpeg_decoding(10, 10, 3, 0, 255) self.assertIsNotNone(jpeg_data) self.assertIsNotNone(mul_image) def testCreateModelInfo(self): did_raise_value_error = False try: retrain.create_model_info('no_such_model_name') except ValueError: did_raise_value_error = True self.assertTrue(did_raise_value_error) model_info = retrain.create_model_info('inception_v3') self.assertIsNotNone(model_info) self.assertEqual(299, model_info['input_width']) if __name__ == '__main__': tf.test.main()
#!/usr/bin/env python # Shellscript to verify r.gwflow calculation, this calculation is based on # the example at page 167 of the following book: # author = "Kinzelbach, W. and Rausch, R.", # title = "Grundwassermodellierung", # publisher = "Gebr{\"u}der Borntraeger (Berlin, Stuttgart)", # year = "1995" # import sys import os import grass.script as grass # Overwrite existing maps grass.run_command("g.gisenv", set="OVERWRITE=1") grass.message(_("Set the region")) # The area is 2000m x 1000m with a cell size of 25m x 25m grass.run_command("g.region", res=50, n=950, s=0, w=0, e=2000) grass.run_command("r.mapcalc", expression="phead= if(row() == 19, 5, 3)") grass.run_command("r.mapcalc", expression="status=if((col() == 1 && row() == 13) ||\ (col() == 1 && row() == 14) ||\ (col() == 2 && row() == 13) ||\ (col() == 2 && row() == 14) ||\ (row() == 19), 2, 1)") g
rass.run_command("r.mapcalc", expression="hydcond=0.001") grass.run_command("r.mapcalc", expression="recharge=0.000000006") grass.run_command("r.mapcalc", expression="top=20") grass.run_command("r.mapcalc", expression="bottom=0") grass.run_command("r.mapcalc", expression="syield=0.001") grass.run_command("r.mapcalc", expression="null=0.0") #compute a steady state groundwater flow grass.run_command("r.gwflow", "f", solver="cholesky", top="top", bottom="bottom", phead="p
head", \ status="status", hc_x="hydcond", hc_y="hydcond", s="syield", \ recharge="recharge", output="gwresult", dt=864000000000, type="unconfined", budget="water_budget")
""" Module summary: Variables: db
_session - A connection to the farmfinder database. """ from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from dbsetup import Base ############################################################################ # Connect to database and create database session: engine = create_engine("sqlite:///secondresponse/database/sr.d
b") Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) db_session = DBSession()
import unittest from cStringIO import StringIO from ..backends import static # There aren't many tests here because it turns out to be way more convenient to # use test_serializer for the majority of cases class TestStatic(unittest.TestCase): def compile(self, input_text, input_data): return static.compile(input_text, input_data) def test_get_0(self): data = """ key: value [Heading 1] other_key: if a == 1: value_1 if a == 2: value_2 value_3 """ manifest = self.compile(data, {"a": 2}) self.assertEquals(manifest.get("key"), "value") children = list(item for item in manifest.iterchildren()) self.assertEquals(len(children), 1) section = children[0] self.assertEquals(section.name, "Heading 1") self.assertEquals(section.get("other_key"), "value_2") self.assertEquals(section.get("key"), "value") def test_get_1(self): data = """ key: value [Heading 1] other_key: if a == 1: value_1 if a == 2: value_2 value_3 """ manifest = self.compile(data, {"a": 3}) children = list(item for item in manifest.iterchildren()) section = chi
ldren[0]
self.assertEquals(section.get("other_key"), "value_3") def test_get_3(self): data = """key: if a == "1": value_1 if a[0] == "ab"[0]: value_2 """ manifest = self.compile(data, {"a": "1"}) self.assertEquals(manifest.get("key"), "value_1") manifest = self.compile(data, {"a": "ac"}) self.assertEquals(manifest.get("key"), "value_2") def test_get_4(self): data = """key: if not a: value_1 value_2 """ manifest = self.compile(data, {"a": True}) self.assertEquals(manifest.get("key"), "value_2") manifest = self.compile(data, {"a": False}) self.assertEquals(manifest.get("key"), "value_1") def test_api(self): data = """key: if a == 1.5: value_1 value_2 key_1: other_value """ manifest = self.compile(data, {"a": 1.5}) self.assertFalse(manifest.is_empty) self.assertEquals(manifest.root, manifest) self.assertTrue(manifest.has_key("key_1")) self.assertFalse(manifest.has_key("key_2")) self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"])) self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"])) def test_is_empty_1(self): data = """ [Section] [Subsection] """ manifest = self.compile(data, {}) self.assertTrue(manifest.is_empty)
#!/usr/bin/env python # -*- coding:UTF-8 __author__ = 'shenshijun' import copy class Queue(object): """ 使用Python的list快速实现一个队列 """ def __init__(self, *arg): super(Queue, self).__init__() self.__queue = list(copy.copy(arg)) self.__size = len(self.__queue) def enter(self, value): self.__size += 1 self.__queue.append(value) def exit(self): if self.__size <= 0: return None else: value = self.__queue[0] self.__size -= 1 del self._
_queue[0] return value def __len__(self): return self.__size def empty(self):
return self.__size <= 0 def __str__(self): return "".join(["Queue(list=", str(self.__queue), ",size=", str(self.__size)])
from distutils.core import setup setup( name = 'voxgenerator', packages = ['voxgenerator', 'voxgenerator.core', 'voxgenerator.plugin', 'voxgenerator.pipeline', 'voxgenerator.generator', 'voxgenerator.service',
'voxgenerator.control'], version = '1.0.3', description = 'Vox generator', url = 'https://github.com/benoitfragit/VOXGenerator/tree/master/voxgenerator', author = 'Benoit Franquet', author_email = 'benoitfraubunt
u@gmail.com', scripts = ['run_voxgenerator.py', 'run_voxgenerator', 'run_voxgenerator_gui.py'], keywords = ['voice', 'control', 'pocketsphinx'], classifiers = ["Programming Language :: Python", "Development Status :: 4 - Beta", "Environment :: Other Environment", "Intended Audience :: Developers", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules"] )
''' New Integration Test for migrate between clusters @author: Legion ''' import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_state as test_state import zstackwoodpecker.test_lib as test_lib test_obj_dict = test_state.TestStateDict() test_stub = test_lib.lib_get_test_stub() data_migration = test_stub.DataMigration() def test(): d
ata_migration.create_vm() data_migration.migrate_vm() test_stub.migrate_vm_to_random_host(data_migration.vm) data_migration.vm.check() data_migration.vm.destroy() test_util.test_pass('Migrate migrated VM Test Success') #Will be called only if exception happens in test(). de
f error_cleanup(): if data_migration.vm: try: data_migration.vm.destroy() except: pass
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-04-25 00:10 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import filer.fields.file class Migration(migrations.Migration): initial = True dependencies = [ ('core', '0001_initial'), ('filer', '0007_auto_20161016_1055'), ('vouchers', '0001_initial'), ('financial', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_M
ODEL), ] operations = [ migrations.AddField( model_name='revenueitem', name='purchasedVoucher', field=models.OneToOneField(blank=True, null=True,
on_delete=django.db.models.deletion.CASCADE, to='vouchers.Voucher', verbose_name='Purchased voucher/gift certificate'), ), migrations.AddField( model_name='revenueitem', name='registration', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Registration'), ), migrations.AddField( model_name='revenueitem', name='submissionUser', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='revenuessubmittedby', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='expenseitem', name='attachment', field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expense_attachment', to='filer.File', verbose_name='Attach File (optional)'), ), migrations.AddField( model_name='expenseitem', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='financial.ExpenseCategory'), ), migrations.AddField( model_name='expenseitem', name='event', field=models.ForeignKey(blank=True, help_text='If this item is associated with an Event, enter it here.', null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Event'), ), migrations.AddField( model_name='expenseitem', name='eventstaffmember', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.EventStaffMember'), ), migrations.AddField( model_name='expenseitem', name='eventvenue', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='venueexpense', to='core.Event'), ), migrations.AddField( model_name='expenseitem', name='payToLocation', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Location'), ), migrations.AddField( model_name='expenseitem', name='payToUser', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='payToUser', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='expenseitem', name='submissionUser', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expensessubmittedby', to=settings.AUTH_USER_MODEL), ), ]
"\{") j.sal.fs.writeFile(filename=dest2, contents=content) return dest2 def __s
tr__(self): C = "<!-- toc -->\n" C += "## %s\n\n" % self.location C += "- %
s\n" % self.path if self.properties != []: C += "- Properties\n" for prop in self.properties: C += " - %s\n" % prop C += "\n### Methods\n" C += "\n" if self.comments is not None: C += "\n%s\n\n" % self.comments keys = sorted(self.methods.keys()) for key in keys: method = self.methods[key] C2 = str(method) C += C2 return C def __repr__(self): return self.__str__() class ObjectInspector: """ functionality to inspect object structure and generate apifile and pickled ordereddict for codecompletion """ def __init__(self): self.__jslocation__ = "j.tools.objectinspector" self.apiFileLocation = j.sal.fs.joinPaths(j.dirs.cfgDir, "codecompletionapi", "jumpscale.api") # j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.cfgDir, "codecompletionapi")) self.classDocs = {} self.visited = [] self.root = None self.manager = None self.logger = j.logger.get('j.tools.objectinspector') self.jstree = OrderedDict() # jstree['j.sal']={'unix': unixobject, 'fs': fsobject} def importAllLibs(self, ignore=[], base="%s/lib/JumpScale/" % j.dirs.base): self.base = os.path.normpath(base) towalk = j.sal.fs.listDirsInDir(base, recursive=False, dirNameOnly=True, findDirectorySymlinks=True) errors = "### errors while trying to import libraries\n\n" for item in towalk: path = "%s/%s" % (base, item) for modname in j.sal.fs.listDirsInDir(path, False, True, True): if modname not in ignore: toexec = "import JumpScale.%s.%s" % (item, modname) try: exec(toexec) except Exception as e: self.logger.error(("COULD NOT IMPORT %s" % toexec)) errors += "**%s**\n\n" % toexec errors += "%s\n\n" % e return errors def raiseError(self, errormsg): self.logger.error("ERROR:%s" % errormsg) errormsg = errormsg.strip() errormsg = errormsg.strip("-") errormsg = errormsg.strip("*") errormsg = errormsg.strip() errormsg = "* %s\n" % errormsg j.sal.fs.writeFile(filename="%s/errors.md" % self.dest, contents=errormsg, append=True) def generateDocs(self, dest, ignore=[], objpath="j"): """ Generates documentation of objpath in destination direcotry dest @param dest: destination directory to write documentation. @param objpath: object path @param ignore: modules list to be ignored during the import. """ self.dest = dest self.apiFileLocation = "%s/jumpscale.api" % self.dest j.sal.fs.writeFile("%s/errors.md" % dest, "") j.sal.fs.createDir(self.dest) self.errors = self.importAllLibs(ignore=ignore) #self.errors = '' objectLocationPath = objpath # extract the object name (j.sal.unix ) -> unix to make a stub out of it. objname = '' filepath = '' if '.' in objpath: objname = objpath.split(".")[-1] else: objname = objpath try: obj = eval(objpath) if "__file__" in dir(obj): filepath = inspect.getabsfile(obj.__file__) if not filepath.startswith(self.base): return else: filepath = inspect.getfile(obj.__class__) if not filepath.startswith(self.base): return except: pass # add the root object to the tree (self.jstree) as its first element (order maintained by ordereddict/pickle) self.jstree[objectLocationPath] = attrib(objname, "class", 'emptydocs', objectLocationPath) self.inspect(objectLocationPath) j.sal.fs.createDir(dest) j.sal.fs.writeFile(filename="%s/errors.md" % dest, contents=self.errors, append=True) self.writeDocs(dest) def _processMethod(self, name, method, path, classobj): if classobj is None: raise j.exceptions.RuntimeError("cannot be None") classpath = ".".join(path.split(".")[:-1]) if classpath not in self.classDocs: self.classDocs[classpath] = ClassDoc(classobj, classpath) obj = self.classDocs[classpath] return obj.addMethod(name, method) def _processClass(self, name, path, classobj): if path not in self.classDocs: self.classDocs[path] = ClassDoc(classobj, path) obj = self.classDocs[path] def inspect(self, objectLocationPath="j", recursive=True, parent=None, obj=None): """ walk over objects in memory and create code completion api in jumpscale cfgDir under codecompletionapi @param object is start object @param objectLocationPath is full location name in object tree e.g. j.sal.fs , no need to fill in """ self.logger.debug(objectLocationPath) if obj is None: try: obj = eval(objectLocationPath) except: self.raiseError("could not eval:%s" % objectLocationPath) return # only process our files try: if "__file__" in dir(obj): filepath = inspect.getabsfile(obj.__file__) filepath = os.path.normpath(filepath) # normalize path if not filepath.startswith(self.base): return else: clsfile = inspect.getfile(obj.__class__) clsfile = os.path.normpath(clsfile) if not clsfile.startswith(self.base): return except Exception as e: # print "COULD NOT DEFINE FILE OF:%s"%objectLocationPath pass if obj not in self.visited and obj: self.visited.append(obj) else: self.logger.debug("RECURSIVE:%s" % objectLocationPath) return attrs = dir(obj) ignore = ["constructor_args", "NOTHING", "template_class", "redirect_cache"] def check(item): if item == "_getFactoryEnabledClasses": return True if item.startswith("_"): return False if item.startswith("im_"): return False if item in ignore: return False return True # if objectLocationPath == 'j.actions.logger.disabled': attrs = [item for item in attrs if check(item)] for objattributename in attrs: filepath = None objectLocationPath2 = "%s.%s" % (objectLocationPath, objattributename) try: objattribute = eval("obj.%s" % objattributename) except Exception as e: self.logger.error(str(e)) self.raiseError("cannot eval %s" % objectLocationPath2) continue if objattributename.upper() == objattributename: # is special type or constant self.logger.debug("special type: %s" % objectLocationPath2) j.sal.fs.writeFile(self.apiFileLocation, "%s?7\n" % objectLocationPath2, True) self.jstree[objectLocationPath2] = attrib(objattributename, "const", '', objectLocationPath2, filepath) elif objattributename == "_getFactoryEnabledClasses": try: for fclparent, name, obj2 in obj._getFactoryEnabledClasses(): if fclparent != "": objectLocationPath2 = objectLocationPath + "." + fclparent + "." + name else: objectLocationPath2 = objectLocationPath + "." + name
"""Tests `numpy_utils.py`.""" # Copyright (c) 2021 Aubrey Barnard. # # This is free, open software released under the MIT license. See # `LICENSE` for details. import random import unittest import numpy.random from .. import numpy_utils class NumpyAsStdlibPrngTest(unittest.TestCase): def test_random_floats(self): seed = 0xdeadbeeffeedcafe n_samples = 10 orig_prng = numpy.random.default_rng(seed) expected = [orig_prng.random() for _ in range(n_samples)] wrap_prng = numpy
_utils.NumpyAsStdlibPrng( numpy.random.default_rng(seed)) actual = [wrap_prng.random() for _ in range(n_samples)] self.assertEqual(expected, actual) class NumpyBitGeneratorTest(unittest.TestCase): def test_random_floats(self): seed = 0xdeadbeeffeedcafe n_samples = 10 old_prng = random.Random(seed) expected = [old_prng.random() for _ in range(n_samples)] new_prng = numpy.random.Generat
or( numpy_utils.numpy_bit_generator( random.Random(seed))) actual = [new_prng.random() for _ in range(n_samples)] self.assertEqual(expected, actual)
sg_helper.make_inbound("inbound") yield self.worker_helper.dispatch_inbound(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_set_default_endpoint_handler(self): conn, consumer = yield self.mk_consumer(connector_name='foo') consumer.unpause() msgs = [] conn._set_default_endpoint_handler('inbound', msgs.append) msg = self.msg_helper.make_inbound("inbound") yield self.worker_helper.dispatch_inbound(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_publish_message_with_endpoint(self): conn = yield self.mk_connector(connector_name='foo') yield conn._setup_publisher('outbound') msg = self.msg_helper.make_outbound("outbound") yield conn._publish_message('outbound', msg, 'dummy_endpoint') msgs = self.worker_helper.get_dispatched_outbound('foo') self.assertEqual(msgs, [msg]) class TestReceiveInboundConnector(BaseConnectorTestCase): connector_class = ReceiveInboundConnector @inlineCallbacks def test_setup(self): conn = yield self.mk_connector(connector_name='foo') yield conn.setup() conn.unpause() with LogCatcher() as lc: msg = self.msg_helper.make_inbound("inbound") yield self.worker_helper.dispatch_inbound(msg, 'foo') [msg_log] = lc.messages() self.assertTrue(msg_log.startswith("No inbound handler for 'foo'")) with LogCatcher() as lc: event = self.msg_helper.make_ack() yield self.worker_helper.dispatch_event(event, 'foo') [event_log] = lc.messages() self.assertTrue(event_log.startswith("No event handler for 'foo'")) msg = self.msg_helper.make_outbound("outbound") yield conn.publish_outbound(msg) msgs = self.worker_helper.get_dispatched_outbound('foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_default_inbound_handler(self): conn = yield self.mk_connector(connector_name='foo', setup=True) with LogCatcher() as lc: conn.default_inbound_handler( self.msg_helper.make_inbound("inbound")) [log] = lc.messages() self.assertTrue(log.startswith("No inbound handler for 'foo'")) @inlineCallbacks def test_default_event_handler(self): conn = yield self.mk_connector(connector_name='foo', setup=True) with LogCatcher() as lc: conn.default_event_handler(self.msg_helper.make_ack()) [log] = lc.messages() self.assertTrue(log.startswith("No event handler for 'foo'")) @inlineCallbacks def test_set_inbound_handler(self): msgs = [] conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause() conn.set_inbound_handler(msgs.append) msg = self.msg_helper.make_inbound("inbound") yield self.worker_helper.dispatch_inbound(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_set_default_inbound_handler(self): msgs = [] conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause() conn.set_default_inbound_handler(msgs
.append) msg = self.msg_helper.make_inbound("inbound") yield self.worker_helper.dispatch_inbound(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_set_event_handler(self): msgs = [] conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause()
conn.set_event_handler(msgs.append) msg = self.msg_helper.make_ack() yield self.worker_helper.dispatch_event(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_set_default_event_handler(self): msgs = [] conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause() conn.set_default_event_handler(msgs.append) msg = self.msg_helper.make_ack() yield self.worker_helper.dispatch_event(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_publish_outbound(self): conn = yield self.mk_connector(connector_name='foo', setup=True) msg = self.msg_helper.make_outbound("outbound") yield conn.publish_outbound(msg) msgs = self.worker_helper.get_dispatched_outbound('foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_inbound_handler_ignore_message(self): def im_handler(msg): raise IgnoreMessage() conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause() conn.set_default_inbound_handler(im_handler) msg = self.msg_helper.make_inbound("inbound") with LogCatcher() as lc: yield self.worker_helper.dispatch_inbound(msg, 'foo') [log] = lc.messages() self.assertTrue(log.startswith( "Ignoring msg due to IgnoreMessage(): <Message")) class TestReceiveOutboundConnector(BaseConnectorTestCase): connector_class = ReceiveOutboundConnector @inlineCallbacks def test_setup(self): conn = yield self.mk_connector(connector_name='foo') yield conn.setup() conn.unpause() with LogCatcher() as lc: msg = self.msg_helper.make_outbound("outbound") yield self.worker_helper.dispatch_outbound(msg, 'foo') [log] = lc.messages() self.assertTrue(log.startswith("No outbound handler for 'foo'")) msg = self.msg_helper.make_inbound("inbound") yield conn.publish_inbound(msg) msgs = self.worker_helper.get_dispatched_inbound('foo') self.assertEqual(msgs, [msg]) msg = self.msg_helper.make_ack() yield conn.publish_event(msg) msgs = self.worker_helper.get_dispatched_events('foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_default_outbound_handler(self): conn = yield self.mk_connector(connector_name='foo', setup=True) with LogCatcher() as lc: conn.default_outbound_handler( self.msg_helper.make_outbound("outbound")) [log] = lc.messages() self.assertTrue(log.startswith("No outbound handler for 'foo'")) @inlineCallbacks def test_set_outbound_handler(self): msgs = [] conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause() conn.set_outbound_handler(msgs.append) msg = self.msg_helper.make_outbound("outbound") yield self.worker_helper.dispatch_outbound(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_set_default_outbound_handler(self): msgs = [] conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause() conn.set_default_outbound_handler(msgs.append) msg = self.msg_helper.make_outbound("outbound") yield self.worker_helper.dispatch_outbound(msg, 'foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_publish_inbound(self): conn = yield self.mk_connector(connector_name='foo', setup=True) msg = self.msg_helper.make_inbound("inbound") yield conn.publish_inbound(msg) msgs = self.worker_helper.get_dispatched_inbound('foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_publish_event(self): conn = yield self.mk_connector(connector_name='foo', setup=True) msg = self.msg_helper.make_ack() yield conn.publish_event(msg) msgs = self.worker_helper.get_dispatched_events('foo') self.assertEqual(msgs, [msg]) @inlineCallbacks def test_outbound_handler_nack_message(self): def im_handler(msg): raise IgnoreMessage() conn = yield self.mk_connector(connector_name='foo', setup=True) conn.unpause() conn.set_default_outbound_handler(im_handler) msg = self.msg_helper.make_inbound("inbound") with LogCatch
from django.apps import AppConfig class InvestmentsConfig(AppConfig): name = 'charcoallog.investment
s' def ready(self): # using @receiver decorator # do not op
timize import !!! import charcoallog.investments.signals # noqa: F401
c_backlinks', 'action': 'store_const', 'const': 'entry', 'default': 'entry'}), ('Link from section headers to the top of the TOC.', ['--toc-top-backlinks'], {'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}), ('Disable backlinks to the table of contents.', ['--no-toc-backlinks'], {'dest': 'toc_backlinks', 'action': 'store_false'}), ('Link from footnotes/citations to references. (default)', ['--footnote-backlinks'], {'action': 'store_true', 'default': 1, 'validator': validate_boolean}), ('Disable backlinks from footnotes and citations.', ['--no-footnote-backlinks'], {'dest': 'footnote_backlinks', 'action': 'store_false'}), ('Enable section numbering by Docutils. (default)', ['--section-numbering'], {'action': 'store_true', 'dest': 'sectnum_xform', 'default': 1, 'validator': validate_boolean}), ('Disable section numbering by Docutils.', ['--no-section-numbering'], {'action': 'store_false', 'dest': 'sectnum_xform'}), ('Remove comment elements from the document tree.', ['--strip-comments'], {'action': 'store_true', 'validator': validate_boolean}), ('Leave comment elements in the document tree. (default)', ['--leave-comments'], {'action': 'store_false', 'dest': 'strip_comments'}), ('Remove all elements with classes="<class>" from the document tree. ' 'Warning: potentially dangerous; use with caution. ' '(Multiple-use option.)', ['--strip-elements-with-class'], {'action': 'append', 'dest': 'strip_elements_with_classes', 'metavar': '<class>', 'validator': validate_strip_class}), ('Remove all classes="<class>" attributes from elements in the ' 'document tree. Warning: potentially dangerous; use with caution. ' '(Multiple-use option.)', ['--strip-class'], {'action': 'append', 'dest': 'strip_classes', 'metavar': '<class>', 'validator': validate_strip_class}),
('Report system messages at or higher than <level>: "info" or "1", ' '"warning"/"2" (default), "error"/"3", "severe"/"4", "none"/"5"', ['--report', '-r'], {'choices': threshold_choices, 'default': 2, 'dest': 'report_level', 'metavar': '<level>', 'validator': validate_threshold}), ('Report all system messages. (Same as "--report=1".)', ['--verbose', '-v'], {'action': 'store_const', 'const': 1,
'dest': 'report_level'}), ('Report no system messages. (Same as "--report=5".)', ['--quiet', '-q'], {'action': 'store_const', 'const': 5, 'dest': 'report_level'}), ('Halt execution at system messages at or above <level>. ' 'Levels as in --report. Default: 4 (severe).', ['--halt'], {'choices': threshold_choices, 'dest': 'halt_level', 'default': 4, 'metavar': '<level>', 'validator': validate_threshold}), ('Halt at the slightest problem. Same as "--halt=info".', ['--strict'], {'action': 'store_const', 'const': 1, 'dest': 'halt_level'}), ('Enable a non-zero exit status for non-halting system messages at ' 'or above <level>. Default: 5 (disabled).', ['--exit-status'], {'choices': threshold_choices, 'dest': 'exit_status_level', 'default': 5, 'metavar': '<level>', 'validator': validate_threshold}), ('Enable debug-level system messages and diagnostics.', ['--debug'], {'action': 'store_true', 'validator': validate_boolean}), ('Disable debug output. (default)', ['--no-debug'], {'action': 'store_false', 'dest': 'debug'}), ('Send the output of system messages to <file>.', ['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}), ('Enable Python tracebacks when Docutils is halted.', ['--traceback'], {'action': 'store_true', 'default': None, 'validator': validate_boolean}), ('Disable Python tracebacks. (default)', ['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}), ('Specify the encoding and optionally the ' 'error handler of input text. Default: <locale-dependent>:strict.', ['--input-encoding', '-i'], {'metavar': '<name[:handler]>', 'validator': validate_encoding_and_error_handler}), ('Specify the error handler for undecodable characters. ' 'Choices: "strict" (default), "ignore", and "replace".', ['--input-encoding-error-handler'], {'default': 'strict', 'validator': validate_encoding_error_handler}), ('Specify the text encoding and optionally the error handler for ' 'output. Default: UTF-8:strict.', ['--output-encoding', '-o'], {'metavar': '<name[:handler]>', 'default': 'utf-8', 'validator': validate_encoding_and_error_handler}), ('Specify error handler for unencodable output characters; ' '"strict" (default), "ignore", "replace", ' '"xmlcharrefreplace", "backslashreplace".', ['--output-encoding-error-handler'], {'default': 'strict', 'validator': validate_encoding_error_handler}), ('Specify text encoding and error handler for error output. ' 'Default: %s:%s.' % (default_error_encoding, default_error_encoding_error_handler), ['--error-encoding', '-e'], {'metavar': '<name[:handler]>', 'default': default_error_encoding, 'validator': validate_encoding_and_error_handler}), ('Specify the error handler for unencodable characters in ' 'error output. Default: %s.' % default_error_encoding_error_handler, ['--error-encoding-error-handler'], {'default': default_error_encoding_error_handler, 'validator': validate_encoding_error_handler}), ('Specify the language (as 2-letter code). Default: en.', ['--language', '-l'], {'dest': 'language_code', 'default': 'en', 'metavar': '<name>'}), ('Write output file dependencies to <file>.', ['--record-dependencies'], {'metavar': '<file>', 'validator': validate_dependency_file, 'default': None}), # default set in Values class ('Read configuration settings from <file>, if it exists.', ['--config'], {'metavar': '<file>', 'type': 'string', 'action': 'callback', 'callback': read_config_file}), ("Show this program's version number and exit.", ['--version', '-V'], {'action': 'version'}), ('Show this help message and exit.', ['--help', '-h'], {'action': 'help'}), # Typically not useful for non-programmatical use: (SUPPRESS_HELP, ['--id-prefix'], {'default': ''}), (SUPPRESS_HELP, ['--auto-id-prefix'], {'default': 'id'}), # Hidden options, for development use only: (SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}), (SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}), (SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}), (SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}), (SUPPRESS_HELP, ['--expose-internal-attribute'], {'action': 'append', 'dest': 'expose_internals', 'validator': validate_colon_separated_string_list}), (SUPPRESS_HELP, ['--strict-visitor'], {'action': 'store_true'}), )) """Runtime settings and command-line options common to all Docutils front ends. Setting specs specific to individual Docutils components are also used (see `populate_from
import os from flask import Flask, render_template, request from PIL import Image import sys import pyocr import pyocr.builders import re import json __author__ = 'K_K_N' app = Flask(__name__) APP_ROOT = os.path.dirname(os.path.a
bspath(__file__)) def ocr(image_file): tools = pyocr.get_available_tools() if len(tools) =
= 0: print("No OCR tool found") sys.exit(1) # The tools are returned in the recommended order of usage tool = tools[0] #print("Will use tool '%s'" % (tool.get_name())) # Ex: Will use tool 'libtesseract' langs = tool.get_available_languages() #print("Available languages: %s" % ", ".join(langs)) lang = langs[1] #print("Will use lang '%s'" % (lang)) txt = tool.image_to_string( Image.open(image_file), lang=lang, builder=pyocr.builders.TextBuilder() ) ektp_no = re.search( r'[?:nik\s*:\s*](\d{1,20})\s*', txt, re.I) #print ektp_no #if ektp_no: # print "ektp_no.group() : ", ektp_no.group() data = {} data['ektp'] = ektp_no.group().strip() return json.dumps(data) @app.route("/") def index(): return render_template("upload.html") @app.route("/upload", methods=['POST']) def upload(): target = os.path.join(APP_ROOT, 'images/') print(target) if not os.path.isdir(target): os.mkdir(target) for file in request.files.getlist("file"): print(file) filename = file.filename destination = "/".join([target, filename]) print(destination) file.save(destination) #Return JSON #print txt #file.delete(destination) return ocr(destination) #return json.dumps(txt) if __name__ == "__main__": app.run(port=4555, debug=True)
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A test of invocation-side code unconnected to an RPC server.""" import unittest from grpc._adapter import _intermediary_low from grpc._links import invocation from grpc.framework.interfaces.links import links from tests.unit.framework.common import test_constants from tests.unit.framework.interfaces.links import test_cases from tests.unit.framework.interfaces.links import test_utilities _NULL_BEHAVIOR = lambda unused_argument: None class LonelyInvocationLinkTest(unittest.TestCase): def testUpAndDown(self): channel = _intermediary_low.Channel('nonexistent:54321', None) invocation_link = inv
ocation.invocation_link( channel, 'nonexistent', None, {}, {}) invocation_link.start() invocation_link.stop() def _test_lonely_invocation_with_termination(self, termination): test_operation_id = object() test_group = 'test packag
e.Test Service' test_method = 'test method' invocation_link_mate = test_utilities.RecordingLink() channel = _intermediary_low.Channel('nonexistent:54321', None) invocation_link = invocation.invocation_link( channel, 'nonexistent', None, {}, {}) invocation_link.join_link(invocation_link_mate) invocation_link.start() ticket = links.Ticket( test_operation_id, 0, test_group, test_method, links.Ticket.Subscription.FULL, test_constants.SHORT_TIMEOUT, 1, None, None, None, None, None, termination, None) invocation_link.accept_ticket(ticket) invocation_link_mate.block_until_tickets_satisfy(test_cases.terminated) invocation_link.stop() self.assertIsNot( invocation_link_mate.tickets()[-1].termination, links.Ticket.Termination.COMPLETION) def testLonelyInvocationLinkWithCommencementTicket(self): self._test_lonely_invocation_with_termination(None) def testLonelyInvocationLinkWithEntireTicket(self): self._test_lonely_invocation_with_termination( links.Ticket.Termination.COMPLETION) if __name__ == '__main__': unittest.main()
#!/usr/bin/env python import numpy as np import torch as th from torchvision import datasets, transforms from nnexp
import learn if __name__ == '__
main__': dataset = datasets.MNIST('./data', train=True, download=True, transform=transforms.ToTensor()) learn('mnist_simple', dataset)
#!/usr/bin/env python ######################################### # Installation module for arachni ######################################### # AUTHOR OF MODULE NAME AUTHOR="Nathan Underwood (sai nate)" # DESCRIPTION OF THE MODULE DESCR
IPTION="Website / webapp vulnerability scanner." # INSTALLATION TYPE # OPTIONS GIT, SVN, FILE, DOWNLOAD I
NSTALL_TYPE="GIT" #LOCATION OF THE FILE OR GIT / SVN REPOSITORY REPOSITORY_LOCATION="https://github.com/Arachni/arachni.git" # WHERE DO YOU WANT TO INSTALL IT INSTALL_LOCATION="arachni" # DEPENDS FOR DEBIAN INSTALLS DEBIAN="" #COMMANDS TO RUN AFTER AFTER_COMMANDS=""
import os __version__ = 'v0.0.7' # update also in setup.py root_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) info = { "name": "NiLabels", "version": __version__, "description": "", "repository": { "type": "git", "url": "" }, "author": "Sebastiano Ferraris", "dependencies": { # requirements.txt automatically generated using pipreqs "python requirements" : "{0}/requirements.txt".format(root_dir)
} } definition_template = """ A template is the average, computed with a chose protocol, of a series of images acquisition of the same anatomy, or in genreral of different objects that share common features. """ definition_atlas = """ An atlas is the segmentation of the template, obtained averaging with a chosen pro
tocol, the series of segmentations corresponding to the series of images acquisition that generates the template. """ definition_label = """ A segmentation assigns each region a label, and labels are represented as subset of voxel with the same positive integer value. """ nomenclature_conventions = """ pfi_xxx = path to file xxx, \npfo_xxx = path to folder xxx, \nin_xxx = input data structure xxx, \nout_xxx = output data structure xxx, \nz_ : prefix to temporary files and folders, \nfin_ : file name. """
from requests import post import io import base64 class ZivService(object): def __init__(self, cnc_url, user=None, passwo
rd=None, sync=True): self.cnc_url = cnc_url self.sync = sync self.auth = None if
user and password: self.auth = (user,password) def send_cycle(self, filename, cycle_filedata): """Send a cycle file to the concentrator service Keyword arguments: filename -- the name of our file (doesn't matter) cycle_filedata -- the file to send, encoded as a base64 string """ filecontent = base64.b64decode(cycle_filedata) url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/' result = None if self.auth: result = post(url, files={'file': (filename, filecontent)}, auth=self.auth) else: result = post(url, files={'file': (filename, filecontent)}) return result
from s
etuptools import setup setup( name='ipy', packages=['ipy'], include_package_data=True, install_requires=[ 'flask
' ], )
#!/usr/bin
/env python from setuptools import setup, Extension setup( name = "python-libmemcached", version = "0.17.0", description="python memcached client wrapped on libmemcached", maintainer="subdragon", maintainer_email="subdragon@gmail.com", requires = ['pyrex'], # This assumes that libmemcache is installed with base /usr/local ext_modules=[
Extension('cmemcached', ['cmemcached.pyx'], libraries=['memcached'], )], test_suite="cmemcached_test", )
#----------------------------------------------------------------------------- # Copyright (c) 2005-2016, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. #-----------------------------------------------------------------------
------ # The 'sysconfig' module requires Makefile and pyconfig.h files from # Python installation. 'sysconfig' parses these files to get some # information from them. # TODO Verify that bundling Makefile and pyconfi
g.h is still required for Python 3. import sysconfig import os from PyInstaller.utils.hooks import relpath_to_config_or_make _CONFIG_H = sysconfig.get_config_h_filename() if hasattr(sysconfig, 'get_makefile_filename'): # sysconfig.get_makefile_filename is missing in Python < 2.7.9 _MAKEFILE = sysconfig.get_makefile_filename() else: _MAKEFILE = sysconfig._get_makefile_filename() datas = [(_CONFIG_H, relpath_to_config_or_make(_CONFIG_H))] # The Makefile does not exist on all platforms, eg. on Windows if os.path.exists(_MAKEFILE): datas.append((_MAKEFILE, relpath_to_config_or_make(_MAKEFILE)))
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals from ..model import Level1Design def test_Level1Design_inputs(): input_map = dict(bases=dict(mandatory=True, ),
contrasts=dict(), ignore_exception=dict(nohash=True, usedefault=True, ), interscan_interval=dict(mandatory=True, ), model_serial_correlations=dict(mandatory=True, ), orthogonalization=dict(), session_info=dict(mandatory=True, ), ) inputs = Level1Design.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value def test_Level1Design_outputs(): output_map = dict(ev_files=dict(), fsf_files=dict(), ) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
#!/usr/bin/env python import sys, json, psycopg2, argparse parser = argparse.ArgumentParser(description='Imports word data into the taboo database.') parser.add_argument('--verified', dest='verified', action='store_true', help='include if these words are verified as good quality') parser.add_argument('--source', dest='source', help='include to set the source of these imported words') args = parser.parse_args() CONN_STR = 'dbname=prod user=prod' data_str = '\n'.join(sys.stdin.readlines()) data = json.loads(data_str) conn = psycopg2.connect(CONN_STR) conn.autocommit = True cur = conn.curs
or() count = 0 for word in data: try: cur.execute("INSERT INTO words (word, skipped, correct, status, source) VALUES(%s, %s, %s, %s, %s) RETURNING wid", (word, 0, 0, 'approved' if args.verified == True else 'unverified', args.source)) wordid = cur.fetchone()[0] prohibited_count = 0 for prohibited in data[word]: prohibited_count = prohibite
d_count + 1 cur.execute("INSERT INTO prohibited_words (wid, word, rank) VALUES(%s, %s, %s)", (wordid, prohibited, prohibited_count)) count = count + 1 except Exception as e: print e cur.close() conn.close() print 'Inserted ' + str(count) + ' words'
import psidialogs s = psidial
ogs.choice(["1", "2", "3"], "Choo
se a number!") if s is not None: print(s)
# -*- coding: utf-8 -*- # Copyright (c) 2010 Jérémie DECOCK (http://www.jdhp.org) import numpy as np from pyarm import fig class MuscleModel: "Muscle model." # CONSTANTS ############################################################### name = 'Fake' ########################################################################### def __init__(self): # Init datas to plot fig.subfig('command', title='Command', xlabel='time (s)', ylabel='Command', ylim=[-0.1, 1.1]) #legend=('shou
lder +', 'shoulder -', # 'elbow +', 'elbow -')) def compute_torque(self, angles, velocities, command): "Compute the torque" torque = np.zeros(2) if len(command) > 2: torque[0] = (command[0] - command[1]) torque[1] = (command[2] - command[3]) fig.append('command', command[0:4]) else:
torque = np.array(command) fig.append('command', command[0:2]) return torque
#!/usr/bin/env python # -*- coding: utf8 -*- import sys import bs4 import json import re def xml2json(s): global timestamp timestamp = 0 s = s.replace(u'\xa0', u' ') soup = bs4.BeautifulSoup(s, features="lxml") interventi
on_vierge = {"intervenant": "", "contexte": ""} intervention_vierge["source"] = "https://www.assemblee-nationale.fr/dyn/15/comptes-rendus/seance/"+soup.uid.string m = soup.metadonnees dateseance = str(m.dateseance.string) intervention_vierge["date"] = "%04d-%02d-%02d" % (int(dateseance[0:4]), int(dateseance[4:6]), int(dateseance[6:8]))
intervention_vierge["heure"] = "%02d:%02d" % (int(dateseance[8:10]), int(dateseance[10:12])) intervention_vierge["session"] = str(m.session.string)[-9:].replace('-', '') contextes = [''] numeros_lois = None intervenant2fonction = {} last_titre = '' for p in soup.find_all(['paragraphe', 'point']): intervention = intervention_vierge.copy() #Gestion des titres/contextes et numéros de loi if p.name == "point" and p.texte and p.texte.get_text() and int(p['nivpoint']) < 4: contextes = contextes[:int(p['nivpoint']) -1 ] if not contextes: contextes = [] contextes.append(p.texte.get_text().replace('\n', '')) if p['valeur'] and p['valeur'][0:9] == ' (n[[o]] ': numeros_lois = p['valeur'][9:-1].replace(' ', '') if len(contextes) > 1: intervention["contexte"] = contextes[0] + " > " + contextes[-1] elif len(contextes) == 1: intervention["contexte"] = contextes[0] if p.name == "point": intervention['intervention'] = "<p>"+contextes[-1]+"</p>" if (last_titre != contextes[-1]): printintervention(intervention) last_titre = contextes[-1] continue #Gestion des interventions if numeros_lois: intervention['numeros_loi'] = numeros_lois intervention["source"] += "#"+p['id_syceron'] if len(p.orateurs): intervention["intervenant"] = p.orateurs.orateur.nom.get_text() if p['id_mandat'] and p['id_mandat'] != "-1": intervention["intervenant_url"] = "http://www2.assemblee-nationale.fr/deputes/fiche/OMC_"+p['id_acteur'] intervention["intervenant"] = p['id_acteur'] if p.orateurs.orateur.qualite and p.orateurs.orateur.qualite.string: intervention['fonction'] = p.orateurs.orateur.qualite.get_text() if not intervenant2fonction.get(intervention["intervenant"]) and intervention['fonction']: intervenant2fonction[intervention["intervenant"]] = intervention['fonction'] elif intervention["intervenant"] == "Mme la présidente": intervention['fonction'] = "présidente" intervention["intervenant"] = ''; elif intervention["intervenant"] == "M le président": intervention['fonction'] = "président" intervention["intervenant"] = ''; else: intervention['fonction'] = intervenant2fonction.get(intervention["intervenant"], "") texte = "<p>" isdidascalie = False texte_didascalie = "" t_string = str(p.texte) t_string = t_string.replace('>\n', '> ') t_string = re.sub(r' ?<\/?texte> ?', '', t_string) t_string = t_string.replace('<italique>', '<i>') t_string = t_string.replace('</italique>', '</i>') t_string = t_string.replace('n<exposant>o</exposant>', 'n°') t_string = t_string.replace('n<exposant>os</exposant>', 'n°') t_string = t_string.replace('</i> <i>', ' ') t_string = t_string.replace('<br/>', '</p><p>') texte += t_string texte += "</p>" i = 0; for i in re.split(' ?(<i>\([^<]*\)</i> ?)', texte): if i[0] == ' ': i = i[1:] if i[-1] == ' ': i = i[:-1] if (i[0:3] != '<p>'): i = '<p>' + i if (i[-4:] != '</p>'): i = i + '</p>' if i.find('<p><i>') == 0: didasc = intervention_vierge didasc["intervention"] = i didasc["contexte"] = intervention["contexte"] printintervention(didasc) else: intervention["intervention"] = i printintervention(intervention) def printintervention(i): global timestamp if i['intervention'] == '<p></p>' or i['intervention'] == '<p> </p>': return intervenants = i['intervenant'].split(' et ') timestamp += 10 for intervenant in intervenants: i['timestamp'] = str(timestamp) i['intervenant'] = intervenant print(json.dumps(i)) content_file = sys.argv[1] with open(content_file, encoding='utf-8') as f: xml2json(f.read())
self.norm) for v in self.variables] def reverse_normalize(self, variables, with_offset=True): # ensures numpy array out = np.empty_like(variables) for i, v in enumerate(variables): out[i] = self.variables[i].reverse_normalize(v, self.norm, with_offset=with_offset) return out def __getitem__(self, key): return self.variables[key] @staticmethod def get_hash(data): return sha256(data.view(np.uint8)).hexdigest() def add_variable(self, variable): if self.variables.count(variable.name) != 0: raise ValueError(f"Multiple variables with same name {variable.name}") self.variables.append(variable) @property def names(self): return [v.name for v in self.variables] @property def values(self): return np.array([v.value for v in self.variables], np.float64) def update(self, variables): """ Update internal variables for the values """ for var, v in zip(self.variables, variables): var.update(v) def dict_values(self): """ Get all vaules in a dictionary table """ return {v.name: v.value for v in self.variables} # Define a dispatcher for converting Minimize data to some specific data # BaseMinimize().to.skopt() will convert to an skopt.OptimizationResult structure to = ClassDispatcher("to", obj_getattr=lambda obj, key: (_ for _ in ()).throw( AttributeError((f"{obj}.to does not implement '{key}' " f"dispatcher, are you using it incorrectly?")) ) ) def __enter__(self): """ Open the file and fill with stuff """ _log.debug(f"__enter__ {self.__class__.__name__}") # check if the file exists if self.out.is_file(): # read in previous data # this will be "[variables, runs]" data, header = tableSile(self.out).read_data(ret_header=True) else: data = np.array([]) # check if the file exists if self.out.is_file() and data.size > 0: nvars = data.shape[0] - 1 if nvars != len(self): raise ValueError(f"Found old file {self.out} which contains previous data for another number of parameters, please delete or move file") # now parse header *header, _ = header[1:].split() idx = [] for name in self.names: # find index in header for i, head in enumerate(header): if head == name: idx.append(i) break if nvars != len(idx): print(header) print(self.names) print(idx) raise ValueError(f"Found old file {self.out} which contains previous data with some variables being renamed, please correct header or move file") # add functional value, no pivot idx.append(len(self)) # re-arrange data (in case user swapped order of variables) data = np.ascontiguousarray(data[idx].T) x, y = data[:, :-1], data[:, -1] # We populate with hashes without the functional # That would mean we can't compare hashes between input arguments # only make the first index a list (x.tolist() makes everything a list) self.data.x = [xi for xi in x] self.data.y = [yi for yi in y] self.data.hash = list(map(self.get_hash, self.data.x)) # Re-open file (overwriting it) # First output a few things in this file comment = f"Created by sisl '{self.__class__.__name__}'." header = self.names + ["metric"] if len(self.data.x) == 0: self._fh = tableSile(self.out, 'w').__enter__() self._fh.write_data(comment=comment, header=header) else: comment += f" The first {len(self.data)} lines contains prior content." data = np.column_stack((self.data.x, self.data.y)) self._fh = tableSile(self.out, 'w').__enter__() self._fh.write_data(data.T, comment=comment, header=header, fmt='20.17e') self._fh.flush() return self def __exit__(self, *args, **kwargs): """ Exit routine """ self._fh.__exit__(*args, **kwargs) # clean-up del self._fh def __len__(self): return len(self.variables) @abstractmethod def __call__(self, variables, *args): """ Actual running code that takes `variables` conforming to the order of initial setup. It will return the functional of the minimize method Parameters ---------- variables : array-like variables to be minimized according to the metric `self.metric` """ def _minimize_func(self, norm_variables, *args): """ Minimization function passed to the minimization method
This is a wrapper which does 3 things: 1. Convert input values from normalized to regular values 2. Update internal variables with the value currently being runned. 3. Check if the values have already been calc
ulated, if so return the metric directly from the stored table. 4. Else, calculate the metric using the ``self.__call__`` 5. Append values to the data and hash it. Parameters ---------- norm_variables : array_like normed variables to be minimized *args : arguments passed directly to the ``self.__call__`` method """ _log.debug(f"{self.__class__.__name__}._minimize_func") # Update internal set of variables variables = self.reverse_normalize(norm_variables) self.update(variables) # First get the hash of the current variables current_hash = self.get_hash(variables) try: idx = self.data.hash.index(current_hash) # immediately return functional value that is hashed _log.info(f"{self.__class__.__name__}._minimize_func, using prior hashed calculation {idx}") return self.data.y[idx] except ValueError: # in case the hash is not found pass # Else we have to call minimize metric = np.array(self(variables, *args)) # add the data to the output file and hash it self._fh.write_data(variables.reshape(-1, 1), metric.reshape(-1, 1), fmt='20.17e') self._fh.flush() self.data.x.append(variables) self.data.y.append(metric) self.data.hash.append(current_hash) return metric @abstractmethod def run(self, *args, **kwargs): """ Run the minimize model """ class LocalMinimize(BaseMinimize): def run(self, *args, **kwargs): # Run minimization (always with normalized values) norm_v0 = self.normalize(self.values) bounds = self.normalize_bounds() with self: opt = minimize(self._minimize_func, x0=norm_v0, args=args, bounds=bounds, **kwargs) return _convert_optimize_result(self, opt) class DualAnnealingMinimize(BaseMinimize): def run(self, *args, **kwargs): # Run minimization (always with normalized values) norm_v0 = self.normalize(self.values) bounds = self.normalize_bounds() with self: opt = dual_annealing(self._minimize_func, x0=norm_v0, args=args, bounds=bounds, **kwargs) return _convert_optimize_result(self, opt) class MinimizeToDispatcher(AbstractDispatch): """ Base dispatcher from class passing from Minimize class """ @staticmethod def _ensure_object(obj): if isinstance(obj, type): raise ValueError(f"Dispatcher on {obj} must not be called on the class.") class MinimizeToskoptDispatcher(MinimizeToDispatcher): def dispat
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from testinfra.modules.base import InstanceModule class Iptables(InstanceModule): """Test iptables rule exists""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # support for -w argument (since 1.6.0) # https://git.netfilter.org/iptables/commit/?id=aaa4ace72b # centos 6 has no support # centos 7 has 1.4 patched self._has_w_argument = None def _iptables_command(self, version):
if version == 4: iptables = "iptables" elif version == 6: iptables = "ip6tables" else: raise RuntimeError("Invalid version: %s" % version) if self._has_w_argument is False: return iptables else: return "{} -w 90".format(iptables) def _run_iptables(self, version, cmd, *args): ipt_cmd = "{} {}".format(self._iptables_command
(version), cmd) if self._has_w_argument is None: result = self.run_expect([0, 2], ipt_cmd, *args) if result.rc == 2: self._has_w_argument = False return self._run_iptables(version, cmd, *args) else: self._has_w_argument = True return result.stdout.rstrip('\r\n') else: return self.check_output(ipt_cmd, *args) def rules(self, table='filter', chain=None, version=4): """Returns list of iptables rules Based on ouput of `iptables -t TABLE -S CHAIN` command optionally takes takes the following arguments: - table: defaults to `filter` - chain: defaults to all chains - version: default 4 (iptables), optionally 6 (ip6tables) >>> host.iptables.rules() [ '-P INPUT ACCEPT', '-P FORWARD ACCEPT', '-P OUTPUT ACCEPT', '-A INPUT -i lo -j ACCEPT', '-A INPUT -j REJECT' '-A FORWARD -j REJECT' ] >>> host.iptables.rules("nat", "INPUT") ['-P PREROUTING ACCEPT'] """ cmd, args = "-t %s -S", [table] if chain: cmd += " %s" args += [chain] rules = [] for line in self._run_iptables(version, cmd, *args).splitlines(): line = line.replace("\t", " ") rules.append(line) return rules
he effect of uploading to an existing blob depends on the "versioning" and "lifecycle" policies defined on the blob's bucket. In the absence of those policies, upload will overwrite any existing contents. See the `object versioning <https://cloud.google.com/storage/docs/object-versioning>`_ and `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_ API documents for details. :type filename: string :param filename: The path to the file. :type content_type: string or ``NoneType`` :param content_type: Optional type of content being uploaded. :type encryption_key: str or bytes :param encryption_key: Optional 32 byte encryption key for customer-supplied encryption. :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. """ content_type = content_type or self._properties.get('contentType') if content_type is None: content_type, _ = mimetypes.guess_type(filename) with open(filename, 'rb') as file_obj: self.upload_from_file(file_obj, content_type=content_type, encryption_key=encryption_key, client=client) def upload_from_string(self, data, content_type='text/plain', encryption_key=None, client=None): """Upload contents of this blob from the provided string. .. note:: The effect of uploading to an existing blob depends on the "versioning" and "lifecycle" policies defined on the blob's bucket. In the absence of those policies, upload will overwrite any existing contents. See the `object versioning <https://cloud.google.com/storage/docs/object-versioning>`_ and `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_ API documents for details. :type data: bytes or text :param data: The data to store in this blob. If the value is text, it will be encoded as UTF-8. :type content_type: string :param content_type: Optional type of content being uploaded. Defaults to ``'text/plain'``. :type encryption_key: str or bytes :param encryption_key: Optional 32 byte encryption key for customer-supplied encryption. :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. """ if isinstance(data, six.text_type): data = data.encode('utf-8') string_buffer = BytesIO() string_buffer.write(data) self.upload_from_file(file_obj=string_buffer, rewind=True, size=len(data), content_type=content_type, encryption_key=encryption_key, client=client) def make_public(self, client=None): """Make this blob public giving all users read access. :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. """ self.acl.all().grant_read() self.acl.save(client=client) cache_control = _scalar_property('cacheControl') """HTTP 'Cache-Control' header for this object. See: https://tools.ietf.org/html/rfc7234#section-5.2 and https://cloud.google.com/storage/docs/json_api/v1/objects If the property is not set locally, returns ``None``. :rtype: string or ``NoneType`` """ content_disposition = _scalar_property('contentDisposition') """HTTP 'Content-Disposition' header for this object. See: https://tools.ietf.org/html/rfc6266 and https://cloud.google.com/storage/docs/json_api/v1/objects If the property is not set locally, returns ``None``. :rtype: string or ``NoneType`` """ content_encoding = _scalar_property('contentEncoding') """HTTP 'Content-Encoding' header for this object. See: https://tools.ietf.org/html/rfc7231#section-3.1.2.2 and https://cloud.google.com/storage/docs/json_api/v1/objects If the property is not set locally, returns ``None``. :rtype: string or ``NoneType`` """ content_language = _scalar_property('contentLanguage') """HTTP 'Content-Language' header for this object. See: http://tools.ietf.org/html/bcp47 and https://cloud.google.com/storage/docs/json_api/v1/objects If the property is not set locally, returns ``None``. :rtype: string or ``NoneType`` """ content_type = _scalar_property('contentType') """HTTP 'Content-Type' header for this object. See: https://tools.ietf.org/html/rfc2616#section-14.17 and https://cloud.google.com/storage/docs/json_api/v1/objects If the property is not set locally, returns ``None``. :rtype: string or ``NoneType`` """ crc32c = _scalar_property('crc32c') """CRC32C checksum for this object. See: http://tools.ietf.org/html/rfc4960#appendix-B and https://cloud.google.com/storage/docs/json_api/v1/objects If the property is not set locally, returns ``None``. :rtype: string or ``NoneType`` """ @property def component_count(self): """Number of underlying components that make up this object. See: https://cloud.google.com/storage/docs/json_api/v1/objects :rtype: integer or ``NoneType`` :returns: The component count (in case of a composed object) or ``None`` if the property is not set locally. This property will not be set on objects not created via ``compose``. """ component_count = self._properties.get('componentCount') if component_count is not None: return int(component_count) @property def etag(self):
"""Retrieve the ETag for the object. See: http://tools.ietf.org/html/rfc2616#section-3.11 and https://cloud.google.com/storage/docs/json_api/v1/objects :rtype: string or ``NoneType`` :returns: The blob etag or ``None`` if the property is not set locally. """ return self._properties.get('eta
g') @property def generation(self): """Retrieve the generation for the object. See: https://cloud.google.com/storage/docs/json_api/v1/objects :rtype: integer or ``NoneType`` :returns: The generation of the blob or ``None`` if the property is not set locally. """ generation = self._properties.get('generation') if generation is not None: return int(generation) @generation.setter def generation(self, value): """Set the generation for this blob. See: https://cloud.google.com/storage/docs/json_api/v1/objects :type value: integer or ``NoneType`` :param value: the generation value for this blob. Setting this value is useful when trying to retrieve specific versions of a blob. """ self._patch_property('generation', value) @property def id(self): """Retrieve the ID for the object. See: https://cloud.google.com/storage/docs/json_api/v1/objects :rtype: string or ``NoneType`` :returns: The ID of the blob or ``None`` if the property is not set locally. """ return self._properties.get('id') md5_hash = _scalar_property('md5Hash') """MD5 hash for this object. See: http://tools.ietf.org/html/rfc4960#appendix-B and https://cloud.google.com/storage/docs/json_api/v1/objects If the property is not set locally, r
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - Mario Lassnig, mario.lassnig@cern.ch, 2016-2017 # - Daniel Drizhuk, d.drizhuk@gmail.com, 2017 import argparse import logging import sys import threading from pilot.util.constants import SUCCESS, FAILURE, ERRNO_NOJOBS from pilot.util.https import https_setup from pilot.util.information import set_location VERSION = '2017-04-04.001' def main(): logger = logging.getLogger(__name__) logger.info('pilot startup - version %s' % VERSION) args.graceful_stop = threading.Event() https_setup(args, VERSION) if not set_location(args): return False logger.info('workflow: %s' % args.workflow) workflow = __import__('pilot.workflow.%s' % args.workflow, globals(), locals(), [args.workflow], -1) return workflow.run(args) if __name__ == '__main__': arg_parser = argparse.ArgumentParser() arg_parser.add_argument('-d', dest='debug', action='store_true', default=False, help='enable debug logging messages') # the choices must match in name the python module in pilot/workflow/ arg_parser.add_argument('-w', dest='workflow', default='generic', choices=['generic', 'generic_hpc', 'production', 'production_hpc', 'analysis', 'analysis_hpc', 'eventservice', 'eventservice_hpc'], help='pilot workflow (default: generic)') # graciously stop pilot process after hard limit arg_parser.add_argument('-l', dest='lifetime', default=10, type=int, help='pilot lifetime seconds (default: 10)') # set the appropriate site and queue arg_parser.add_argument('-q', dest='queue', required=True, help='MANDATORY: queue name (e.g., AGLT2_TEST-condor') # graciously stop pilot process after hard limit arg_parser.add_argument('-j', dest='job_label', default='mtest', help='job prod/source label (default: mtest)') # SSL certificates arg_parser.add_argument('--cacert', dest='cacert', default=None, help='CA certificate to use with HTTPS calls to server, commonly X509 proxy', metavar='path/to/your/certificate') arg_parser.add_argument('--capath', dest='capath', default=None, help='CA certificates path', metavar='path/to/certificates/') args = arg_parser.parse_args() console = logging.StreamHandler(sys.stdout) if args.debug: logging.basicConfig(filename='pilotlog.txt', level=logging.DEBUG, format='%(asctime)s | %(levelname)-8s | %(threadName)-10s | %(name)-32s | %(funcName)-32s | %(message)s') console.setLevel(logging.DEBUG) console.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(threadName)-10s | %(name)-32s | %(funcName)-32s | %(message)s')) else: logging.basicConfig(filename='pilotlog.txt', level=logging.INFO, format='%(asctime)s | %(levelname)-8s | %(message)s') console.setLevel(logging.INFO) console.setFormatter(logging.Formatter('
%(asctime)s | %(levelname)-8s | %(message)s')) logging.getLogger('').addHandler(console) trace = main() logging.shutdown() if not trace: logging.getLo
gger(__name__).critical('pilot startup did not succeed -- aborting') sys.exit(FAILURE) elif trace.pilot['nr_jobs'] > 0: sys.exit(SUCCESS) else: sys.exit(ERRNO_NOJOBS)
"""Colle
ction of helpers
for online deployment."""
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals from ..brainsresample import BRAINSResample def test_BRAINSResample_inputs(): input_map = dict(args=dict(argstr='%s', ), defaultValue=dict(argstr='--defaultVa
lue %f', ), deformationVolume=dict(argstr='--deformationVolume %s', ), environ=dict(nohash=True, usedefault=True, ), gridSpacing=dict(argstr='--gridSpacing %s', sep=',', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), inverseTransform=dict(argstr='--inve
rseTransform ', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), pixelType=dict(argstr='--pixelType %s', ), referenceVolume=dict(argstr='--referenceVolume %s', ), terminal_output=dict(deprecated='1.0.0', nohash=True, ), warpTransform=dict(argstr='--warpTransform %s', ), ) inputs = BRAINSResample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSResample_outputs(): output_map = dict(outputVolume=dict(), ) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
astores": [component_generator( "datastores", source_provider, provider, get_data(source_provider, "datastores", "nfs").type, get_data(provider, "datastores", "nfs").type)], "networks": [ component_generator("vlans", source_provider, provider, get_data(source_provider, "vlans", "VM Network"), get_data(provider, "vlans", "ovirtmgmt")) ], } return infra_mapping_data @pytest.fixture(scope="function") def mapping_data_multiple_vm_obj_single_datastore(request, appliance, source_provider, provider): # this fixture will take list of N VM templates via request and call get_vm for each cluster = provider.data.get("clusters", [False])[0] if not cluster: pytest.skip("No data for cluster available on provider.") infra_mapping_data = infra_mapping_default_data(source_provider, provider) recursive_update( infra_mapping_data, { "description": "Single Datastore migration of VM from {ds_type1} to {ds_type2},".format( ds_type1=request.param[0], ds_type2=request.param[1] ), "networks": [ component_generator("vlans", source_provider, provider, "VM Network", "ovirtmgmt") ], }, ) vm_list = [] for template_name in request.param[2]: vm_list.append(get_vm(request, appliance, source_provider, template_name)) return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=vm_list) @pytest.fixture(scope="function") def mapping_data_single_datastore(request, source_provider, provider): infra_mapping_data = infra_mapping_default_data(source_provider, provider) recursive_update( infra_mapping_data, { "description": "Single Datastore migration of VM from {ds_type1} to {ds_type2},".format( ds_type1=request.param[0], ds_type2=request.param[1] ), "datastores": [ component_generator( "datastores", source_provider, provider, request.param[0], request.param[1] ) ], }, ) return infra_mapping_data @pytest.fixture(scope="function") def mapping_data_single_network(request, source_provider, provider): infra_mapping_data = infra_mapping_default_data(source_provider, provider) recursive_update( infra_mapping_data, { "description": "Single Network migration of VM from {vlan1} to {vlan2},".format( vlan1=request.param[0], vlan2=request.param[1] ), "networks": [ component_generator( "vlans", source_provider, provider, request.param[0], request.param[1] ) ], }, ) return infra_mapping_data @pytest.fixture(scope="function") def edited_mapping_data(request, source_provider, provider): infra_mapping_data = infra_mapping_default_data(source_provider, provider) edited_form_data = { "description": "my edited description", "clusters": {}, "datastores": {}, "networks": [ component_generator( "vlans", source_provider, provider, request.param[1][0], request.param[1][1] ) ], } return infra_mapping_data, edited_form_data @pytest.fixture(scope="function") def mapping_data_dual_vm_obj_dual_datastore(request, appliance, source_provider, provider): vmware_nw = source_provider.data.get("vlans", [None])[0] rhvm_nw = provider.data.get("vlans", [None])[0] cluster = provider.data.get("clusters", [False])[0] if not vmware_nw or not rhvm_nw or not cluster: pytest.skip("No data for source or target network in providers.") infra_mapping_data = infra_mapping_default_data(source_provider, provider) recursive_update( infra_mapping_data, { "description": "Dual DS migration of VM from {dss1} to {dst1},& from {dss2} to {dst2}". format(dss1=request.param[0][0], dst1=request.param[0][1], dss2=request.param[1][0], dst2=request.param[1][1]), "datastores": [ component_generator( "datastores", source_provider, provider, request.param[0][0], request.param[0][1], ), component_generator( "datastores", source_provider, provider, request.param[1][0], request.param[1][1], ), ], "networks": [ component_generator( "vlans", source_provider, provider, source_provider.data.get("vlans")[0], provider.data.get("vlans")[0], ) ], }, ) # creating 2 VMs on two different datastores and returning its object list vm_obj1 = get_vm(request, appliance, source_provider, request.param[0][2], request.param[0][0]) vm_obj2 = get_vm(request, appliance, source_provider, request.param[1][2], request.param[1][0]) return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=[vm_obj1, vm_obj2]) @pytest.fixture(scope="function") def mapping_data_vm_obj_dual_nics(request, appliance, source_provider, provider): vmware_nw = source_provider.data.get("vlans", [None])[0] rhvm_nw = provider.data.get("vlans", [None])[0] cluster = provider.data.get("clusters", [False])[0] if not vmware_nw or not rhvm_nw or not cluster: pytest.skip("No data for source or target network in providers.") infra_mapping_data = infra_mapping_default_data(source_provider, provider) recursive_update( infra_mapping_data, { "description": "Dual DS migration of VM from {dss1} to {dst1},& from {dss2} to {dst2}". format(dss1=request.param[0][0], dst1=request.param[0][1], dss2=request.param[1][0], dst2=request.param[1][1]), "networks": [ component_generator( "vlans", source_provider, provider, request.param[0][0], request.param[0][1] ), component_generator( "vlans", source_provider, provider, request.param[1][0], requ
est.param[1][1] ), ], }, ) vm_obj = get_vm(request, appliance, source_provider, request.param[2]) return FormDataVmObj(in
fra_mapping_data=infra_mapping_data, vm_list=[vm_obj]) @pytest.fixture(scope="function") def mapping_data_vm_obj_single_datastore(request, appliance, source_provider, provider): """Return Infra Mapping form data and vm object""" infra_mapping_data = infra_mapping_default_data(source_provider, provider) recursive_update( infra_mapping_data, { "description": "Single DS migration of VM from {ds_type1} to {ds_type2},".format( ds_type1=request.param[0], ds_type2=request.param[1] ), "datastores": [ component_generator( "datastores", source_provider, provider, request.param[0], request.param[1] ) ], }, ) vm_obj = get_vm(request, appliance, source_provider, request.param[2], request.param[0]) return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=[vm_obj]) @pytest.fixture(scope="function") def mapping_data_vm_obj_single_network(request, appliance, source_provider, provider): infra_mapping_data = infra_mapping_default_data(source_provider, provider) recursive_update( infra_mapping_data, { "description": "Single Network migration of VM from {vlan1} to {vlan2},".format( vlan1=request.param[0], vlan2=request.param[1] ), "networks": [
import process_common as pc import process_operations as po import module_dialogs import module_info from header_dialogs import * start_states = [] end_states = [] def compile_dialog_states(processor, dialog_file): global start_states global end_states unique_state_list = ["start", "party_encounter", "prisoner_liberated", "enemy_defeated", "party_relieved", "event_triggered", "close_window", "trade", "exchange_members", "trade_prisoners", "buy_mercenaries", "view_char", "training", "member_chat", "prisoner_chat"] unique_state_usages = [1 for i in unique_state_list] unique_states = dict((k, i) for i, k in enumerate(unique_state_list)) last_index = len(unique_state_list) for entry in module_dialogs.dialogs: end_state = entry[5] index = unique_states.setdefault(end_state, last_index) if index == last_index: last_index += 1 unique_state_list.append(end_state) unique_state_usages.append(0) end_states.append(index) for entry in module_dialogs.dialogs: start_state = entry[2] try: index = unique_states[start_state] unique_state_usages[index] += 1 start_states.append(index) except KeyError: pc.ERROR("starting dialog state '%s' has no matching ending state" % start_state) for state, usages in zip(unique_state_list, unique_state_usages): if not usages: pc.ERROR("ending dialog state '%s' is not used" % state) with open(module_info.export_path("dialog_states.txt"), "wb") as state_file: state_file.write("".join("%s\r\n" % e for e in unique_state_list)) dialog_names = {} def
get_dialog_name(start_state, end_state, text): global dialog_names name = "dlga_%s:%s" % (pc.convert_to_identifier(start_state), pc.convert_to_ident
ifier(end_state)) text_list = dialog_names.setdefault(name, []) for i, existing_text in enumerate(text_list): if text == existing_text: name = "%s.%d" % (name, i + 1) break else: text_list.append(text) return name def process_entry(processor, txt_file, entry, index): name = get_dialog_name(entry[start_state_pos], entry[end_state_pos], entry[text_pos]) trp_pt = entry[speaker_pos] flags = entry[flags_pos] speaker = 0 if flags & other: speaker = processor.process_id(trp_pt[1], "trp") << other_bits flags ^= other trp_pt = trp_pt[0] if flags & party_tpl: speaker |= processor.process_id(trp_pt, "pt") else: speaker |= processor.process_id(trp_pt, "trp") speaker |= flags output_list = ["%s %d %d " % (name, speaker, start_states[index])] output_list.extend(processor.process_block(entry[conditions_pos], "%s conditions" % name)) output_list.append("%s " % pc.replace_spaces(entry[text_pos]) if entry[text_pos] else "NO_TEXT ") output_list.append(" %d " % end_states[index]) output_list.extend(processor.process_block(entry[consequences_pos], "%s consequences" % name)) output_list.append("%s " % entry[voice_pos] if len(entry) > voice_pos else "NO_VOICEOVER ") output_list.append("\r\n") txt_file.write("".join(output_list)) export = po.make_export(data=module_dialogs.dialogs, data_name="dialogs", file_name="conversation", header_format="dialogsfile version 2\r\n%d\r\n", process_entry=process_entry, process_list=compile_dialog_states)
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writin
g, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License
. import apache_beam as beam import logging from typing import Dict, Any, List from uploaders.google_ads.customer_match.abstract_uploader import GoogleAdsCustomerMatchAbstractUploaderDoFn from uploaders import utils from models.execution import DestinationType, AccountConfig class GoogleAdsCustomerMatchContactInfoUploaderDoFn(GoogleAdsCustomerMatchAbstractUploaderDoFn): def get_list_definition(self, account_config: AccountConfig, destination_metadata: List[str]) -> Dict[str, Any]: list_name = destination_metadata[0] return { 'membership_status': 'OPEN', 'name': list_name, 'description': 'List created automatically by Megalista', 'membership_life_span': 10000, 'crm_based_user_list': { 'upload_key_type': 'CONTACT_INFO', #CONTACT_INFO, CRM_ID, MOBILE_ADVERTISING_ID 'data_source_type': 'FIRST_PARTY', } } def get_row_keys(self) -> List[str]: return ['hashed_email', 'address_info', 'hashed_phone_number'] def get_action_type(self) -> DestinationType: return DestinationType.ADS_CUSTOMER_MATCH_CONTACT_INFO_UPLOAD
import unittest import urllib import logging from google.appengine.ext import testbed from google.appengine.api import urlfetch from conference import ConferenceApi from models import ConferenceForm from models import ConferenceForms from models import ConferenceQueryForm from models import ConferenceQueryForms from protorpc.remote import protojson def init_stubs(tb): tb.init_urlfetch_stub() tb.init_app_identity_stub() tb.init_blobstore_stub() tb.init_capability_stub() tb.init_channel_stub() tb.init_datastore_v3_stub() tb.init_files_stub() # tb.init_mail_stub() tb.init_memcache_stub() tb.init_taskqueue_stub(root_path='tests/resources') tb.init_user_stub() tb.init_xmpp_stub() return tb class AppEngineAPITest(unittest.TestCase): def setUp(self): logging.getLogger().setLevel(
logging.DEBUG) tb = testbed.Testbed() tb.setup_env(current_version_id='testbed.version') tb.activate() self.testbed = init_stubs(tb) def testUrlfetch(self): # response = urlfetch.fetch('http://www.google
.com') url = 'http://localhost:9000/_ah/api/conference/v1/conference' # form_fields = { # "name": "Albert" # } form_fields = ConferenceForm(name='steven') form_data = protojson.encode_message(form_fields) # form_data = urllib.urlencode(form_fields) response = urlfetch.fetch(url=url, payload=form_data, method=urlfetch.POST, headers={'Content-Type': 'application/json'}) print(dir(response)) print(response.content) self.assertEquals(200, response.status_code)
import os import unittest from vsg.rules import architecture from vsg import vhdlFile from vsg.tests import utils sTestDir = os.path.dirname(__file__) lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_016_test_input.vhd')) lExpected_require_blank = [] lExpected_require_blank.append('') utils.read_file(os.path.join(sTestDir, 'rule_016_test_input.fixed_require_blank.vhd'), lExpected_require_blank) lExpected_no_blank = [] lExpected_no_blank.append('') utils.read_file(os.path.join(sTestDir, 'rule_016_test_input.fixed_no_blank.vhd'), lExpected_no_blank) class test_architecture_rule(unittest.TestCase): def setUp(self): self.oFile = vhdlFile.vhdlFile(lFile) self.assertIsNone(eError) def test_rule_016_require_blank(self): oRule = architecture.rule_016() self.assertTrue(oRule) self.assertEqual(oRule.name, 'architecture') self.assertEqual(oRule.identifier, '016') lExpected = [7, 12, 17] oRule.analyze(self.oFile) self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations)) def test_fix_rule_016_require_blank(self): oRule = architecture.rule_016() oRule.fix(self.oFile) lActual = self.oFile.get_lines() self.assertEqual(lExpected_require_blank, lActual) oRule.analyze(self.oFile) self.assertEqua
l(oRule.violations, []) def test_rule_016_no_blank(self): oRule = architecture.rule_016() oRule.style = 'no_blank_line' lExpected = [23] oRule.analyze(self.oFile) self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations)) def test_fix_rule_016_no_blank(self): oRule = architecture.rule_016() oRule.style = 'no_blank_line' oRule.fix(self.oFile)
lActual = self.oFile.get_lines() self.assertEqual(lExpected_no_blank, lActual) oRule.analyze(self.oFile) self.assertEqual(oRule.violations, [])
): """ Decode the KoradSerial status byte. It appears that the firmware is a little wonky here. SOURCE: Taken from http://www.eevblog.com/forum/testgear/korad-ka3005p-io-commands/ Contents 8 bits in the following format Bit Item Description 0 CH1 0=CC mode, 1=CV mode 1 CH2 0=CC mode, 1=CV mode 2, 3 Tracking 00=Independent, 01=Tracking series,11=Tracking parallel 4 Beep 0=Off, 1=On 5 Lock 0=Lock, 1=Unlock 6 Output 0=Off, 1=On 7 N/A N/A """ def __init__(self, status): """ Initialize object with a KoradSerial status character. :param status: Status value :type status: int """ super(Status, self).__init__() self.raw = status self.channel1 = ChannelMode(status & 1) self.channel2 = ChannelMode((status >> 1) & 1) self.tracking = Tracking((status >> 2) & 3) self.beep = OnOffState((status >> 4) & 1) self.lock = OnOffState((status >> 5) & 1) self.output = OnOffState((status >> 6) & 1) def __repr__(self): return "{0}".format(self.raw) def __str__(self): return "Channel 1: {0}, Channel 2: {1}, Tracking: {2}, Beep: {3}, Lock: {4}, Output: {5}".format( self.channel1.name, self.channel2.name, self.tracking.name, self.beep.name, self.lock.name, self.output.name, ) def __unicode__(self): return self.__str__() def float_or_none(value): try: return float(value) except (TypeError, ValueError): return None class KoradSerial(object): """ Wrapper for communicating with a programmable KoradSerial KA3xxxxP power supply as a serial interface. """ class Channel(object): """ Wrap a channel. """ def __init__(self, serial_, channel_number): """ :type serial_: KoradSerial.Serial :type channel_number: int """ super(KoradSerial.Channel, self).__init__() self.__serial = serial_ self.number = channel_number @property def current(self): result = self.__serial.send_receive("ISET{0}?".format(self.number), fixed_length=6) # There's a bug that return a 6th character of previous output. # This has to be read and discarded otherwise it will be prepended to the next output return float_or_none(result[:5]) @current.setter def current(self, value): self.__serial.send("ISET{0}:{1:05.3f}".format(self.number, value)) @property def voltage(self): return float_or_none(self.__serial.send_receive("VSET{0}?".format(self.number), fixed_length=5)) @voltage.setter def voltage(self, value): self.__serial.send("VSET{0}:{1:05.2f}".format(self.number, value)) @property def output_current(self): """ Retrieve this channel's current current output. :return: Amperes :rtype: float or None """ result = self.__serial.send_receive("IOUT{0}?".format(self.number), fixed_length=5) return float_or_none(result) @property def output_voltage(self): """ Retrieve this channel's current current voltage. :return: Volts :rtype: float or None """ result = self.__serial.send_receive("VOUT{0}?".format(self.number), fixed_length=5) return float_or_none(result) class Memory(object): """ Wrap a memory setting. """ def __init__(self, serial_, memory_number): super(KoradSerial.Memory, self).__init__() self.__serial = serial_ self.number = memory_number def recall(self): """ Recall this memory's settings. """ self.__serial.send("RCL{0}".format(self.number)) def save(self): """ Save the current voltage and current to this memory. """ self.__serial.send("SAV{0}".format(self.number)) class OnOffButton(object): """ Wrap an off/off button. """ def __init__(self, serial_, on_command, off_command): super(KoradSerial.OnOffButton, self).__init__() self.__serial = serial_ self._on = on_command self._off = off_command def on(self): self.__serial.send(self._on) def off(self): self.__serial.send(self._off) class Serial(object): """ Serial operations. There are some quirky things in communication. They go here. """ def __init__(self, port, debug=False): super(KoradSerial.Serial, self).__init__() self.debug = debug self.port = serial.Serial(por
t, 9600, timeout=1) def read_character(self): c = self.port.read(1).decode('ascii') if self.debug: if len(c) > 0: print("read: {0} = '{1}'".format(ord(c), c)) else: print("read: timeout") return c def read_string(self, fixed_length=None): """ Read a string. It appears that the KoradSerial PSU returns zero-
terminated strings. :return: str """ result = [] c = self.read_character() while len(c) > 0 and ord(c) != 0: result.append(c) if fixed_length is not None and len(result) == fixed_length: break c = self.read_character() return ''.join(result) def send(self, text): if self.debug: print("_send: ", text) sleep(0.1) self.port.write(text.encode('ascii')) def send_receive(self, text, fixed_length=None): self.send(text) return self.read_string(fixed_length) def __init__(self, port, debug=False): super(KoradSerial, self).__init__() self.__serial = KoradSerial.Serial(port, debug) # Channels: adjust voltage and current, discover current output voltage. self.channels = [KoradSerial.Channel(self.__serial, i) for i in range(1, 3)] # Memory recall/save buttons 1 through 5 self.memories = [KoradSerial.Memory(self.__serial, i) for i in range(1, 6)] # Second column buttons self.beep = KoradSerial.OnOffButton(self.__serial, "BEEP1", "BEEP0") self.output = KoradSerial.OnOffButton(self.__serial, "OUT1", "OUT0") self.over_current_protection = KoradSerial.OnOffButton(self.__serial, "OCP1", "OCP0") self.over_voltage_protection = KoradSerial.OnOffButton(self.__serial, "OVP1", "OVP0") def __enter__(self): """ See documentation for Python's ``with`` command. """ return self def __exit__(self, type, value, traceback): """ See documentation for Python's ``with`` command. """ self.close() return False # ################################################################################ # Serial operations # ################################################################################ @property def is_open(self): """ Report whether the serial port is open. :rtype: bool """ return self.__serial.port.isOpen() def close(self): """ Close the serial port """ self.__serial.port.close() def open(self): """ Open the serial port """ self.__serial.port.open() # ################################################################################ # Power supply operations # ################################################################################ @property def model(self): """ Report the power supply model information. :rtype: str """ return self.__serial.send_rec
#!/usr/bin/env pyth
on import os, sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conext.settings") from django.core.management import execute_from_command_line
import conext.startup as startup startup.run() execute_from_command_line(sys.argv) pass
#!/usr/bin/python3 # -*- coding: utf-8 -*- # health.py file is part of slpkg. # Copyright 2014-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com> # All rights reserved. # Slpkg is a user-friendly package manager for Slackware installations # https://gitlab.com/dslackw/slpkg # Slpkg is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from slpkg.messages import Msg from slpkg.__metadata__ import MetaData as _meta_ from slpkg.pkg.find import find_package class PackageHealth: """Health check installed packages """ def __init__(self, mode): self.mode = mode self.meta = _meta_ self.green = _meta_.color["GREEN"] self.red = _meta_.color["RED"] self.yellow = _meta_.color["YELLOW"] self.endc = _meta_.color["ENDC"] self.msg = Msg() self.pkg_path = _meta_.pkg_path self.installed = [] self.cn = 0 def packages(self): """Get all installed packages from /var/log/packages/ path """ self.installed = find_package("", self.pkg_path) def check(self, line, pkg): line = line.replace("\n", "") try: if (not line.endswith("/") and not line.endswith(".new") and
not line.startswith("dev/") and not line.startswith("install/") and "/incoming/" not in line): if not os.
path.isfile(r"/" + line): self.cn += 1 print(f"Not installed: {self.red}/{line}{self.endc} --> {pkg}") elif not self.mode: print(line) except IOError: print() raise SystemExit() def test(self): """Get started test each package and read file list """ self.packages() self.cf = 0 for pkg in self.installed: if os.path.isfile(f"{self.meta.pkg_path}{pkg}"): self.lf = 0 with open(self.pkg_path + pkg, "r") as fopen: for line in fopen: if "\0" in line: print(f"Null: {line}") break self.cf += 1 # count all files self.lf += 1 # count each package files if self.lf > 19: self.check(line, pkg) self.results() def results(self): """Print results """ print() per = int(round((float(self.cf) / (self.cf + self.cn)) * 100)) if per > 90: color = self.green elif per < 90 and per > 60: color = self.yellow elif per < 60: color = self.red health = f"{color}{str(per)}%{self.endc}" self.msg.template(78) print(f"| Total files{' ' * 7}Not installed{' ' * 40}Health") self.msg.template(78) print(f"| {self.cf}{' ' * (18-len(str(self.cf)))}{self.cn}{' ' * (55-len(str(self.cn)))}{health:>4}") self.msg.template(78)
# -*- coding: utf-8 -*- """Module to daemonize the current process on Unix.""" # # (C) Pywikibot team, 2007-2015 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals __version__ = '$Id$' import codecs import os import sys is_daemon = False def daemonize(close_fd=True, chdir=True, write_pid=False, redirect_std=None): """ Daemonize the current process. Only works on POSIX compatible operating systems. The process will fork to the background and return control to terminal. @param close_fd: Close the standard streams and replace them by /dev/null @type close_fd: bool @param chdir: Change the current working directory to / @type chdir: bool @param write_pid: Write the pid to sys.argv[0] + '.pid' @type write_pid: bool @param redirect_std: Filename to redirect stdout and stdin to @type redirect_std: str """ # Fork away if not os.fork(): # Become session leader os.setsid() # Fork again to prevent the process from acquiring a # controlling terminal pid = os.fork() if not pid: global is_daemon is_daemon = True if close_fd: os.close(0) os.close(1) os.close(2) os.open('/dev/null', os.O_RDWR) if redirect_std: os.open(redirect_std, os.O_WRONLY | os.O_APPEND | os.O_CREAT) else:
os.dup2(0, 1) os.dup2(1, 2) if chdir: os.chdir('/') return else:
# Write out the pid path = os.path.basename(sys.argv[0]) + '.pid' with codecs.open(path, 'w', 'utf-8') as f: f.write(str(pid)) os._exit(0) else: # Exit to return control to the terminal # os._exit to prevent the cleanup to run os._exit(0)
fr
om .tobii_pro_wrapper import *
radm.URL_LOGIN, auth=(user.name, user.pwd)) assert r.status_code == 200 utoken = r.text # test cases for status, page, per_page in [ (None, None, None), ('pending', None, None), ('accepted', None, None), ('rejected', None, None), ('preauthorized', None, None), (None, 1, 10), (None, 3, 10), (None, 2, 5), ('accepted', 1, 4), ('accepted', 2, 4), ('accepted', 5, 2), ('pending', 2, 2)]: qs_params = {} if status is not None: qs_params['status'] = status if page is not None: qs_params['page'] = page if per_page is not None: qs_params['per_page'] = per_page r = da.with_auth(utoken).call('GET', deviceauth_v2.URL_DEVICES, qs_params=qs_params) assert r.status_code == 200 api_devs = r.json() ref_devs = filter_and_page_devs(devs_authsets, page=page, per_page=per_page, status=status) self._compare_devs(ref_devs, api_devs) def do_test_get_device(self, devs_authsets, user): da = ApiClient(deviceauth_v2.URL_MGMT) ua = ApiClient(useradm.URL_MGMT) # log in user r = ua.call('POST', useradm.URL_LOGIN, auth=(user.name, user.pwd)) assert r.status_code == 200 utoken = r.text # existing devices for dev in devs_authsets: r = da.with_auth(utoken).call('GET', deviceauth_v2.URL_DEVICE, path_params={'id': dev.id}) assert r.status_code == 200 api_dev = r.json() self._compare_dev(dev, api_dev) # non-existent devices for id in ['foo', 'bar']: r = da.with_auth(utoken).call('GET', deviceauth_v2.URL_DEVICE, path_params={'id': id}) assert r.status_code == 404 def do_test_delete_device_ok(self, devs_authsets, user, tenant_token=''): devapim = ApiClient(deviceauth_v2.URL_MGMT) devapid = ApiClient(deviceauth_v1.URL_DEVICES) userapi = ApiClient(useradm.URL_MGMT) depapi = ApiClient(deployments.URL_DEVICES) # log in user r = userapi.call('POST', useradm.URL_LOGIN, auth=(user.name, user.pwd)) assert r.status_code == 200 utoken = r.text # decommission a pending device dev_pending = filter_and_page_devs(devs_authsets, status='pending')[0] r = devapim.with_auth(utoken).call('DELETE', deviceauth_v2.URL_DEVICE, path_params={'id': dev_pending.id}) assert r.status_code == 204 # only verify the device is gone r = devapim.with_auth(utoken).call('GET', deviceauth_v2.URL_DEV
ICE, path_params={'id': dev_pending.id}) assert r.status_code == 404 # log in an accepted device dev_acc = filter_and_page_devs(devs_authsets, status='accepted')[0] body, sighdr = deviceauth_v1.auth_req(
dev_acc.id_data, dev_acc.authsets[0].pubkey, dev_acc.authsets[0].privkey, tenant_token) r = devapid.call('POST', deviceauth_v1.URL_AUTH_REQS, body, headers=sighdr) assert r.status_code == 200 dtoken = r.text # decommission the accepted device r = devapim.with_auth(utoken).call('DELETE', deviceauth_v2.URL_DEVICE, path_params={'id': dev_acc.id}) assert r.status_code == 204 # verify the device lost access r = depapi.with_auth(dtoken).call('GET', deployments.URL_NEXT, qs_params={'device_type': 'foo', 'artifact_name': 'bar'}) assert r.status_code == 401 # verify the device is gone r = devapim.with_auth(utoken).call('GET', deviceauth_v2.URL_DEVICE, path_params={'id': dev_acc.id}) assert r.status_code == 404 def do_test_delete_device_not_found(self, devs_authsets, user): ua = ApiClient(useradm.URL_MGMT) da = ApiClient(deviceauth_v2.URL_MGMT) # log in user r = ua.call('POST', useradm.URL_LOGIN, auth=(user.name, user.pwd)) assert r.status_code == 200 utoken = r.text # try delete r = da.with_auth(utoken).call('DELETE', deviceauth_v2.URL_DEVICE, path_params={'id': 'foo'}) assert r.status_code == 404 # check device list unmodified r = da.with_auth(utoken).call('GET', deviceauth_v2.URL_DEVICES) assert r.status_code == 200 api_devs = r.json() self._compare_devs(devs_authsets, api_devs) def do_test_device_count(self, devs_authsets, user): ua = ApiClient(useradm.URL_MGMT) da = ApiClient(deviceauth_v2.URL_MGMT) # log in user r = ua.call('POST', useradm.URL_LOGIN, auth=(user.name, user.pwd)) assert r.status_code == 200 utoken = r.text # test cases: successful counts for status in [None, \ 'pending', \ 'accepted', \ 'rejected', \ 'preauthorized']: qs_params={} if status is not None: qs_params={'status': status} r = da.with_auth(utoken).call('GET', deviceauth_v2.URL_DEVICES_COUNT, qs_params=qs_params) assert r.status_code == 200 count = r.json() ref_devs = filter_and_page_devs(devs_authsets, status=status) ref_count = len(ref_devs) assert ref_count == count['count'] # fail: bad request r = da.with_auth(utoken).call('GET', deviceauth_v2.URL_DEVICES_COUNT, qs_params={'status': 'foo'}) assert r.status_code == 400 def _compare_devs(self, devs, api_devs): assert len(api_devs) == len(devs) for i in range(len(api_devs)): self._compare_dev(devs[i], api_devs[i]) def _compare_dev(self, dev, api_dev): assert api_dev['id'] == dev.id assert api_dev['identity_data'] == dev.id_data assert api_dev['status'] == dev.status assert len(api_dev['auth_sets']) == len(dev.authsets) # GOTCHA: don't rely on indexing, authsets can get reshuffled # depending on actual contents (we don't order them, so it's up to mongo) for api_aset in api_dev['auth_sets']: aset = [a for a in dev.authsets if util.crypto.rsa_compare_keys(a.pubkey, api_aset['pubkey'])] assert len(aset) == 1 aset = aset[0] compare_aset(aset, api_aset) def _filter_and_page_devs(self, devs, page=None, per_page=None, status=None): if status is not None: devs = [d for d in devs if d.status==status] if page is None: page = 1 if per_page is None: per_page = 20 lo = (page-1)*per_page hi = lo + per_page
#!/home/mjwtom/install/pyt
hon/bin/python # -*- coding: utf-8 -*- import os import subprocess from nodes import storage_nodes as ips def generate_rings(): print (os.environ["PATH"]) os.environ["PATH"] = '/home/mjwtom/install/python/bin' + ":" + os.env
iron["PATH"] print (os.environ["PATH"]) dev = 'sdb1' ETC_SWIFT='/etc/swift' if not os.path.exists(ETC_SWIFT): os.makedirs(ETC_SWIFT) if os.path.exists(ETC_SWIFT+'/backups'): cmd = ['rm', '-rf', '%s/backups' % ETC_SWIFT] subprocess.call(cmd) print 'current work path:%s' % os.getcwd() os.chdir(ETC_SWIFT) print 'change work path to:%s' % os.getcwd() files = os.listdir(ETC_SWIFT) for file in files: path = ETC_SWIFT + '/' + file if os.path.isdir(path): continue shotname, extentsion = os.path.splitext(file) if (extentsion == '.builder') or (extentsion == '.gz'): try: os.remove(path) except Exception as e: print e for builder, port in [('object.builder', 6000), ('object-1.builder', 6000), ('object-2.builder', 6000), ('container.builder', 6001), ('account.builder', 6002)]: cmd = ['swift-ring-builder', '%s' % builder, 'create', '10', '3', '1'] subprocess.call(cmd) i = 1 for ip in ips: cmd = ['swift-ring-builder', '%s' % builder, 'add', 'r%dz%d-%s:%d/%s' % (i, i, ip, port, dev), '1'] subprocess.call(cmd) i += 1 cmd = ['swift-ring-builder', '%s' % builder, 'rebalance'] subprocess.call(cmd) if __name__ == '__main__': generate_rings()
x) self._Az = np.empty_like(self._Ax) if 'hMTF' in self.compute: self._hMTF = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32). reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx))) self.dt = dt self.t_l_last = -1. self.t_h_last = -1. self.t = t0 @property def t(self): return self._t @t.setter def t(self, value): self._t = np.float32(value) # Update low/high times t_l = np.float32(np.floor(self._t / self.dt) * self.dt) t_h = t_l + self.dt if (t_l != self.t_l_last) or (t_h != self.t_h_last): # Only update t_h if 'going up' if t_l == self.t_h_last: if 'D' in self.compute: self._Dx[0] = self._Dx[1] self._Dy[0] = self._Dy[1] self._Dz[0] = self._Dz[1] if 'Diff' in self.compute: self._Diffx[0] = self._Diffx[1] self._Diffy[0] = self._Diffy[1] if 'Diff2' in self.compute: self._Diffxx[0] = self._Diffxx[1] self._Diffyy[0] = self._Diffyy[1] self._Diffxy[0] = self._Diffxy[1] if 'V' in self.compute: self._Vx[0] = self._Vx[1] self._Vy[0] = self._Vy[1] self._Vz[0] = self._Vz[1] if 'A' in self.compute: self._Ax[0] = self._Ax[1] self._Ay[0] = self._Ay[1] self._Az[0] = self._Az[1] if 'hMTF' in self.compute: self._hMTF[0] = self._hMTF[1] t_update = np.array([[1, t_h]]) else: t_update = np.array([[0, t_l], [1, t_h]]) # Initialize surface properties for t_i in t_update: if self.rank == self.root: self.surface.t = t_i[1] if 'D' in self.compute: Dx_f = (self.surface.Dx, (self.counts, self.displ), MPI.FLOAT) Dy_f = (self.surface.Dy, (self.counts, self.displ), MPI.FLOAT) Dz_f = (self.surface.Dz, (self.counts, self.displ), MPI.FLOAT) if 'Diff' in self.compute: Diffx_f = (self.surface.Diffx, (self.counts, self.displ), MPI.FLOAT) Diffy_f = (self.surface.Diffy, (self.counts, self.displ), MPI.FLOAT) if 'Diff2' in self.compute: Diffxx_f = (self.surface.Diffxx, (self.counts, self.displ), MPI.FLOAT) Diffyy_f = (self.surface.Diffyy, (self.counts, self.displ), MPI.FLOAT) Diffxy_f = (self.surface.Diffxy, (self.counts, self.displ), MPI.FLOAT) if 'V' in self.compute: Vx_f = (self.surface.Vx, (self.counts, self.displ), MPI.FLOAT) Vy_f = (self.surface.Vy, (self.counts, self.displ), MPI.FLOAT) Vz_f = (self.surface.Vz, (self.counts, self.displ), MPI.FLOAT) if 'A' in self.compute: Ax_f = (self.surface.Ax, (self.counts, self.displ), MPI.FLOAT) Ay_f = (self.surface.Ay, (self.counts, self.displ), MPI.FLOAT) Az_f = (self.surface.Az, (self.counts, self.displ), MPI.FLOAT) if 'hMTF' in self.compute: hMTF_f = (self.surface.hMTF, (self.counts, self.displ), MPI.FLOAT) else: if 'D' in self.compute: Dx_f = None
Dy_f = None Dz_f = None if 'Diff' in self.compute: Diffx_f = None Diffy_f = None if 'Diff2' in self.compute: Diffxx_f = None Diffyy_f = None Diffxy_f = None if 'V' in self.compute:
Vx_f = None Vy_f = None Vz_f = None if 'A' in self.compute: Ax_f = None Ay_f = None Az_f = None if 'hMTF' in self.compute: hMTF_f = None if 'D' in self.compute: self.comm.Scatterv( Dx_f, (self._Dx[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Dy_f, (self._Dy[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Dz_f, (self._Dz[int(t_i[0])], MPI.FLOAT), root=self.root) if 'Diff' in self.compute: self.comm.Scatterv( Diffx_f, (self._Diffx[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Diffy_f, (self._Diffy[int(t_i[0])], MPI.FLOAT), root=self.root) if 'Diff2' in self.compute: self.comm.Scatterv( Diffxx_f, (self._Diffxx[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Diffyy_f, (self._Diffyy[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Diffxy_f, (self._Diffxy[int(t_i[0])], MPI.FLOAT), root=self.root) if 'V' in self.compute: self.comm.Scatterv( Vx_f, (self._Vx[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Vy_f, (self._Vy[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Vz_f, (self._Vz[int(t_i[0])], MPI.FLOAT), root=self.root) if 'A' in self.compute: self.comm.Scatterv( Ax_f, (self._Ax[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Ay_f, (self._Ay[int(t_i[0])], MPI.FLOAT), root=self.root) self.comm.Scatterv( Az_f, (self._Az[int(t_i[0])], MPI.FLOAT), root=self.root) if 'hMTF' in self.compute: self.comm.Scatterv( hMTF_f, (self._hMTF[int(t_i[0])], MPI.FLOAT), root=self.root) self.t_l_last = t_l self.t_h_last = t_h # Apply linear interpolation w_h = np.float32((self._t - t_l) / self.dt) w_l = np.float32(1. - w_h) if 'D' in self.compute: self.Dx = w_l * self._Dx[0] + w_h * self._Dx[1] self.Dy = w_l * self._Dy[0] + w_h * self._Dy[1] self.Dz = w_l * self._Dz[0] + w_h * self._Dz[1] if 'Diff' in self.compute: self.Diffx = w_l * self._Diffx[0] + w_h * self._Diffx[1] self.Diffy = w_l * self._Diffy[0] + w_h * self._Diffy[1] if 'Diff2' in self.compute: self.Diffxx = w_l * self._Diffxx[0] + w_h * self._Diffxx[1] self.Diffyy = w_l * self._Diffyy[0] + w_h * self._Diffyy[1] self.Diffxy = w_l * self._Diffxy[0] + w_h * self._Diffxy[1] if 'V' in self.compute: self.Vx = w_l * self._Vx[0] + w_h * self._Vx[1] self.Vy = w_l * self._Vy[0] + w_h * self._Vy[1] self.Vz = w_l * self._Vz[0] + w_h * self._Vz[1] if 'A' in sel
from unittest import TestCase from settings import settings from office365.outlookservices.outlook_client import OutlookClient from office365.runtime.auth.authentication_context import AuthenticationContext class OutlookClientTestCase(TestCase): """SharePoint specific test case base class""" @classmethod def setUpClass(cls): # Due to Outlook REST
API v1.0 BasicAuth Deprecation # (refer https://developer.microsoft.com/en-us/office/blogs/outlook-rest-api-v1-0-basicauth-deprecation/) # NetworkCredentialContext class should be no longer utilized # ctx_auth = NetworkCredentialContext(username=settings['user_credentials']['username']
, # password=settings['user_credentials']['password']) ctx_auth = AuthenticationContext(url=settings['tenant']) ctx_auth.acquire_token_password_grant(client_credentials=settings['client_credentials'], user_credentials=settings['user_credentials']) cls.client = OutlookClient(ctx_auth)
from django.db import models from django.utils import timezone class ReceiveAddress(models.Model): address = models.CharField(max_length=128, blank=True) available = models.BooleanField(default=True) @classmethod def newAddress(cls, address): receive_address = cls() receive_address.address = address receive_address.available = True return receive_address def use(self): self.available = False self.save() class MoneySent(models.Model): from_address = models.CharField(max_length=128) to_address = models.CharField(max_length=128) value = models.DecimalField(max_digits=16, decimal_places=8) transaction_hash = models.CharField(max_length=128, null=True) status = models.CharField(max_length=30) creationDate = models.DateTimeField() lastChangeDate = models.DateTimeField() CREATED = 'CREATED' SENT = 'SENT' CONFIRMED_IPN = 'CONFIRMED_IPN' CONFIRMED_TRN = 'CONFIRMED_TRN' @classmethod def newMoneySent(cls, from_address, to_address, value): money_sent = cls() money_sent.from_address = from_address money_sent.to_address = to_address money_sent.value = value money_sent.stat
us = MoneySent.CREATED money_sent.creationDate = timezone.now() money_sent.lastChangeDate = money_sent.creationDate
return money_sent def touch(self): self.lastChangeDate = timezone.now() def sent(self, transaction_hash): self.status = MoneySent.SENT self.transaction_hash = transaction_hash self.touch() self.save() def confirm_ipn(self): if self.status == MoneySent.CREATED or self.status == MoneySent.SENT: self.status = MoneySent.CONFIRMED_IPN self.touch() self.save() def confirm_trn(self): self.status = MoneySent.CONFIRMED_TRN self.touch() self.save()
pos = [randint(-c_w, w - c_w), randint(-c_h, h - c_h)] vel = [vx * random0(), vy * random0()] cs.append((pos, vel, s)) elif cp is not None: self.current_cp = cp data = conf.LEVELS[ID] # background self.bgs = data.get('bgs', conf.DEFAULT_BGS) # player if self.current_cp >= 0: p = list(data['checkpoints'][self.current_cp][:2]) s_p, s_c = conf.PLAYER_SIZE, conf.CHECKPOINT_SIZE for i in (0, 1): p[i] += float(s_c[i] - s_p[i]) / 2 else: p = data['player_pos'] self.player = Player(self, p) # window x, y = Rect(self.to_screen(self.player.rect)).center w, h = conf.HALF_WINDOW_SIZE self.window = Rect(x - w, y - h, 2 * w, 2 * h) self.old_window = self.window.copy() # checkpoints s = conf.CHECKPOINT_SIZE self.checkpoints = [Rect(p + s) for p in data.get('checkpoints', [])] # goal self.goal = Rect(data['goal'] + conf.GOAL_SIZE) self.goal_img = self.goal.move(conf.GOAL_OFFSET) self.goal_img.size = self.imgs['goal'].get_size() # stars self.stars = [Star(self, p, [ID, i] in conf.STARS) for i, p in enumerate(data.get('stars', []))] if self.star_channel is not None and not all(s.got for s in self.stars): self.star_channel.unpause() # rects self.all_rects = [Rect(r) for r in data.get('rects', [])] self.all_vrects = [Rect(r) for r in data.get('vrects', [])] self.arects = [Rect(r) for r in data.get('arects', [])] self.update_rects() def skip (self, evt): if self.dying and self.dying_counter < conf.DIE_SKIP_THRESHOLD and \ not (evt.type == pg.KEYDOWN and evt.key in conf.KEYS_BACK) and \ not self.winning: self.init() elif conf.DEBUG and evt.type == pg.MOUSEBUTTONDOWN: r = self.player.rect c = self.window.center print 'moving to', c for i in (0, 1): r[i] = c[i] - (r[i + 2] / 2) self.player.old_rect = r def pause (self, *args): if self.move_channel is not None: self.move_channel.pause() if self.star_channel is not None: self.star_channel.pause() self.game.start_backend(ui.Paused, self) self.paused = True def reset (self, *args): if not self.winning: self.init() def jump (self, key, mode, mods): self.player.jump(mode == 0) def move (self, key, mode, mods, i): self.player.move(i) def update_window (self): w = self.window wp0 = w.topleft wp1 = w.bottomright s = conf.RES self.inverse_win = rs = [] for px in (0, 1, 2): for py in (0, 1, 2): if px == py == 1: continue r = [0, 0, 0, 0] for i, p in enumerate((px, py)): if p == 0: r[i + 2] = wp0[i] if p == 1: r[i] = wp0[i] r[i + 2] = wp1[i] - wp0[i] elif p == 2: r[i] = wp1[i] r[i + 2] = s[i] - wp1[i] if r[2] > 0 and r[3] > 0: rs.append(Rect(r)) def get_clip (self, r1, r2, err = 0): x01, y01, w, h = r1 x11, y11 = x01 + w, y01 + h x02, y02, w, h = r2 x12, y12 = x02 + w, y02 + h x0, y0 = max(x01, x02), max(y01, y02) x1, y1 = min(x11, x12), min(y11, y12) w, h = x1 - x0, y1 - y0 if w > err and h > err: return (x0, y0, w, h) def update_rects (self): self.update_window() # rects self.rects = rects = [] self.draw_rects = draw = [] w = self.window for r in self.all_rects: c = w.clip(r) if c: rects.append(c) draw.append(r) # vrects self.vrects = rects = [] ws = self.inverse_win for r in self.all_vrects: for w in ws: c = w.clip(r) if c: rects.append(c) def handle_collisions (self): get_clip = self.get_clip p = self.player.rect p0 = list(p) for r in self.rects + self.vrects + self.arects: if get_clip(r, p): r_x0, r_y0, w, h = r r_x1, r_y1 = r_x0 + w, r_y0 + h p_x0, p_y0, w, h = p p_x1, p_y1 = p_x0 + w, p_y0 + h x, dirn = min((p_x1 - r_x0, 0), (p_y1 - r_y0, 1), (r_x1 - p_x0, 2), (r_y1 - p_y0, 3)) axis = dirn % 2 p[axis] += (1 if dirn >= 2 else -1) * x self.player.impact(axis, 0) if axis == 1: self.vert_dirn = dirn # screen left/right if p[0] < 0: p[0] = 0 self.player.impact(0, 0) elif p[0] + p[2] > conf.RES[0]: p[0] = conf.RES[0] - p[2] self.player.impact(0, 0) # die if still colliding axes = set() e = conf.ERR colliding = [r for r in self.rects + self.vrects + self.arects \ if get_clip(r, p, e)] if colliding: for r in collidi
ng: r_x0, r_y0, w,
h = r r_x1, r_y1 = r_x0 + w, r_y0 + h p_x0, p_y0, w, h = p p_x1, p_y1 = p_x0 + w, p_y0 + h x, dirn = min((p_x1 - r_x0, 0), (p_y1 - r_y0, 1), (r_x1 - p_x0, 2), (r_y1 - p_y0, 3)) axes.add(dirn % 2) if len(axes) == 2: dirn = .5 else: dirn = .95 if axes.pop() == 0 else .1 self.die(dirn) def die (self, dirn = .5): self.first_dying = True self.dying = True self.dying_counter = conf.DIE_TIME # particles pos = list(Rect(self.to_screen(self.player.rect)).center) self.add_ptcls('die', pos, dirn) # sound if self.move_channel is not None: self.move_channel.pause() self.game.play_snd('die') def next_level (self, save = True, progress = True): if progress: if self.move_channel is not None: self.move_channel.pause() if self.star_channel is not None: self.star_channel.pause() i = self.ID if not conf.COMPLETED and i + 1 in conf.EXISTS: # there's a next level if save: conf.CURRENT_LEVEL = i + 1 if progress: self.init(i + 1) else: if save: conf.COMPLETED = True if progress: self.game.switch_backend(ui.LevelSelect) def win (self): if self.winning: return self.winning = True self.next_level(progress = False) if self.ID not in conf.COMPLETED_LEVELS: conf.COMPLETED_LEVELS.append(self.ID) conf.dump() self.start_fading(lambda: self.next_level(False)) def update (self): # fade counter if self.fading: self.fade_counter -= 1 if self.fade_counter == 0: self.fading = False del self.fade_sfc self.fade_cb() # move player if not self.dying: pl = self.player pl.update() # get amount to move window by w = self.window self.old_window = w.copy() x0, y0 = self.centre if self.paused or self.first: dx = dy = 0 self.first = False else: x, y = pg.mouse.get_pos() dx, dy = x - x0, y - y0 # don't move too far outside the screen w_moved = w.move(dx, dy).clamp(self.window_bds) dx, dy = w_mo