commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
ad0608847e8eef659f57d580c3816af653275339 | Fix paddle.init bug | putcn/Paddle,chengduoZH/Paddle,tensor-tang/Paddle,lcy-seso/Paddle,Canpio/Paddle,Canpio/Paddle,lcy-seso/Paddle,lispc/Paddle,reyoung/Paddle,Canpio/Paddle,chengduoZH/Paddle,luotao1/Paddle,tensor-tang/Paddle,tensor-tang/Paddle,hedaoyuan/Paddle,luotao1/Paddle,reyoung/Paddle,QiJune/Paddle,lispc/Paddle,baidu/Paddle,baidu/Paddle,pkuyym/Paddle,pkuyym/Paddle,jacquesqiao/Paddle,Canpio/Paddle,jacquesqiao/Paddle,PaddlePaddle/Paddle,lispc/Paddle,lcy-seso/Paddle,hedaoyuan/Paddle,pengli09/Paddle,yu239/Paddle,putcn/Paddle,Canpio/Paddle,pkuyym/Paddle,lispc/Paddle,putcn/Paddle,PaddlePaddle/Paddle,QiJune/Paddle,hedaoyuan/Paddle,PaddlePaddle/Paddle,pkuyym/Paddle,pengli09/Paddle,QiJune/Paddle,hedaoyuan/Paddle,pkuyym/Paddle,QiJune/Paddle,QiJune/Paddle,baidu/Paddle,chengduoZH/Paddle,hedaoyuan/Paddle,baidu/Paddle,reyoung/Paddle,hedaoyuan/Paddle,yu239/Paddle,luotao1/Paddle,lcy-seso/Paddle,baidu/Paddle,jacquesqiao/Paddle,luotao1/Paddle,putcn/Paddle,QiJune/Paddle,luotao1/Paddle,lispc/Paddle,luotao1/Paddle,jacquesqiao/Paddle,Canpio/Paddle,lcy-seso/Paddle,pengli09/Paddle,pengli09/Paddle,pengli09/Paddle,reyoung/Paddle,PaddlePaddle/Paddle,reyoung/Paddle,pengli09/Paddle,reyoung/Paddle,lispc/Paddle,PaddlePaddle/Paddle,hedaoyuan/Paddle,Canpio/Paddle,PaddlePaddle/Paddle,lispc/Paddle,chengduoZH/Paddle,yu239/Paddle,jacquesqiao/Paddle,tensor-tang/Paddle,yu239/Paddle,yu239/Paddle,Canpio/Paddle,yu239/Paddle,luotao1/Paddle,PaddlePaddle/Paddle,pengli09/Paddle,jacquesqiao/Paddle,chengduoZH/Paddle,lispc/Paddle,tensor-tang/Paddle,hedaoyuan/Paddle,pengli09/Paddle,yu239/Paddle,putcn/Paddle,pkuyym/Paddle,yu239/Paddle,lcy-seso/Paddle,putcn/Paddle | python/paddle/v2/__init__.py | python/paddle/v2/__init__.py | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optimizer
import layer
import activation
import parameters
import trainer
import event
import data_type
import topology
import data_feeder
import networks
import evaluator
from . import dataset
from . import reader
from . import plot
import attr
import pooling
import inference
import networks
import py_paddle.swig_paddle as api
import minibatch
import plot
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
'topology', 'networks', 'infer', 'plot', 'evaluator'
]
def init(**kwargs):
args = []
args_dict = {}
# NOTE: append arguments if they are in ENV
for ek, ev in os.environ.iteritems():
if ek.startswith("PADDLE_INIT_"):
args_dict[ek.replace("PADDLE_INIT_", "").lower()] = str(ev)
args_dict.update(kwargs)
# NOTE: overwrite arguments from ENV if it is in kwargs
for key in args_dict.keys():
args.append('--%s=%s' % (key, str(args_dict[key])))
api.initPaddle(*args)
infer = inference.infer
batch = minibatch.batch
| # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optimizer
import layer
import activation
import parameters
import trainer
import event
import data_type
import topology
import data_feeder
import networks
import evaluator
from . import dataset
from . import reader
from . import plot
import attr
import pooling
import inference
import networks
import py_paddle.swig_paddle as api
import minibatch
import plot
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
'topology', 'networks', 'infer', 'plot', 'evaluator'
]
def init(**kwargs):
args = []
args_dict = {}
# NOTE: append arguments if they are in ENV
for ek, ev in os.environ.iteritems():
if ek.startswith("PADDLE_INIT_"):
args_dict[ek.replace("PADDLE_INIT_", "").lower()] = str(ev)
args_dict.update(kwargs)
# NOTE: overwrite arguments from ENV if it is in kwargs
for key in args_dict.keys():
args.append('--%s=%s' % (key, str(kwargs[key])))
api.initPaddle(*args)
infer = inference.infer
batch = minibatch.batch
| apache-2.0 | Python |
cf54719f3a383f836bb9e4671d8e36ca8ebc0baa | fix the error | nanshihui/PocCollect,nanshihui/PocCollect | cms/phpcms/__init__.py | cms/phpcms/__init__.py | KEYWORDS = ['phpcms', ]
def rules(head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
if 'phpcms' in context or 'phpcms' in head:
return True
else:
return False | KEYWORDS = ['huaficms', ]
def rules(head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
if 'huaficms' in context or 'huaficms' in head:
return True
else:
return False | mit | Python |
ea0a31ce5f273c193b23548064efb04a85af9ce6 | Update hook-visvis.py | UmSenhorQualquer/pythonVideoAnnotator | build_settings/win/hooks/hook-visvis.py | build_settings/win/hooks/hook-visvis.py | from PyInstaller.utils.hooks import collect_submodules, collect_data_files
from visvis.freezeHelp import getIncludes
import visvis
import visvis.core
import visvis.processing
modules = getIncludes('qt4')
hiddenimports = tuple([collect_submodules(module) for module in modules]+[collect_submodules('visvis.core')+collect_submodules('visvis.processing')])
datas = collect_data_files('visvis', include_py_files=True)
| from PyInstaller.utils.hooks import collect_submodules, collect_data_files
import visvis
import visvis.core
import visvis.processing
hiddenimports = (collect_submodules('visvis.core') +
collect_submodules('visvis.processing'))
datas = collect_data_files('visvis', include_py_files=True)
| mit | Python |
032df1fe8649a622611e80e5e1efacdba4d7cafe | reorganize imports | diyclassics/cltk,mbevila/cltk,coderbhupendra/cltk,TylerKirby/cltk,kylepjohnson/cltk,D-K-E/cltk,cltk/cltk,LBenzahia/cltk,TylerKirby/cltk,LBenzahia/cltk | cltk/tests/test_ner.py | cltk/tests/test_ner.py | """Test cltk.ner."""
from cltk.ner import ner
from cltk.stem.latin.j_v import JVReplacer
import os
import unittest
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904
"""Class for unittest"""
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
ner._check_latest_data('latin')
names_path = os.path.expanduser('~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_make_ner_str_list_latin(self):
"""Test make_ner()."""
jv_replacer = JVReplacer()
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
tokens = ner.make_ner('latin', input_text=text_str_iu, output_type=list)
target = [('ut',), ('Uenus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity'), (',',), ('ut',), ('aliae',), ('quae',), ('primae',), ('dicuntur',), ('esse',), ('mangitudinis',), ('.',)]
self.assertEqual(tokens, target)
def test_make_ner_list_list_latin(self):
"""Test make_ner()."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
tokens = ner.make_ner('latin', input_text=text_list_iu, output_type=list)
target = [('ut',), ('Uenus',), ('Sirius', 'Entity')]
self.assertEqual(tokens, target)
if __name__ == '__main__':
unittest.main()
| """Test cltk.ner."""
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
from cltk.ner import ner
from cltk.stem.latin.j_v import JVReplacer
import os
import unittest
class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904
"""Class for unittest"""
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
ner._check_latest_data('latin')
names_path = os.path.expanduser('~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_make_ner_str_list_latin(self):
"""Test make_ner()."""
jv_replacer = JVReplacer()
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
tokens = ner.make_ner('latin', input_text=text_str_iu, output_type=list)
target = [('ut',), ('Uenus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity'), (',',), ('ut',), ('aliae',), ('quae',), ('primae',), ('dicuntur',), ('esse',), ('mangitudinis',), ('.',)]
self.assertEqual(tokens, target)
def test_make_ner_list_list_latin(self):
"""Test make_ner()."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
tokens = ner.make_ner('latin', input_text=text_list_iu, output_type=list)
target = [('ut',), ('Uenus',), ('Sirius', 'Entity')]
self.assertEqual(tokens, target)
if __name__ == '__main__':
unittest.main()
| mit | Python |
91eeee8abbf516f00ce891076433c9222247bf67 | Add display names to historical calendars. | jwg4/qual,jwg4/calexicon | qual/calendars/historical.py | qual/calendars/historical.py | from datetime import date
from base import Calendar
from date import InvalidDate
from main import JulianCalendar
class JulianToGregorianCalendar(Calendar):
def date(self, year, month, day):
gregorian_date = date(year, month, day)
if gregorian_date < self.first_gregorian_day:
julian_date = JulianCalendar().date(year, month, day)
if not julian_date < self.first_gregorian_day:
raise InvalidDate("This is a 'missing day' when the calendars changed.")
self.bless(julian_date)
return julian_date
return self.from_date(gregorian_date)
def bless(self, date):
date.calendar = self.__class__
class EnglishHistoricalCalendar(JulianToGregorianCalendar):
display_name = "English Historical Calendar"
first_gregorian_day = date(1752, 9, 14)
class SpanishHistoricalCalendar(JulianToGregorianCalendar):
display_name = "Spanish Historical Calendar"
first_gregorian_day = date(1582, 10, 15)
class FrenchHistoricalCalendar(JulianToGregorianCalendar):
display_name = "French Historical Calendar"
first_gregorian_day = date(1582, 12, 20)
| from datetime import date
from base import Calendar
from date import InvalidDate
from main import JulianCalendar
class JulianToGregorianCalendar(Calendar):
def date(self, year, month, day):
gregorian_date = date(year, month, day)
if gregorian_date < self.first_gregorian_day:
julian_date = JulianCalendar().date(year, month, day)
if not julian_date < self.first_gregorian_day:
raise InvalidDate("This is a 'missing day' when the calendars changed.")
self.bless(julian_date)
return julian_date
return self.from_date(gregorian_date)
def bless(self, date):
date.calendar = self.__class__
class EnglishHistoricalCalendar(JulianToGregorianCalendar):
first_gregorian_day = date(1752, 9, 14)
class SpanishHistoricalCalendar(JulianToGregorianCalendar):
first_gregorian_day = date(1582, 10, 15)
class FrenchHistoricalCalendar(JulianToGregorianCalendar):
first_gregorian_day = date(1582, 12, 20)
| apache-2.0 | Python |
8ca30840b5477239625c93f4799f266f0a971e3d | Add query to json output in json generation test | uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco | raco/datalog/datalog_test.py | raco/datalog/datalog_test.py | import unittest
import json
import raco.fakedb
from raco import RACompiler
from raco.language import MyriaAlgebra
from myrialang import compile_to_json
class DatalogTestCase(unittest.TestCase):
def setUp(self):
self.db = raco.fakedb.FakeDatabase()
def execute_query(self, query):
'''Run a test query against the fake database'''
#print query
dlog = RACompiler()
dlog.fromDatalog(query)
#print dlog.logicalplan
dlog.optimize(target=MyriaAlgebra,
eliminate_common_subexpressions=False)
#print dlog.physicalplan
# test whether we can generate json without errors
json_string = json.dumps(compile_to_json(
query, dlog.logicalplan, dlog.physicalplan))
assert json_string
op = dlog.physicalplan[0][1]
output_op = raco.algebra.StoreTemp('__OUTPUT__', op)
self.db.evaluate(output_op)
return self.db.get_temp_table('__OUTPUT__')
def run_test(self, query, expected):
'''Execute a test query with an expected output'''
actual = self.execute_query(query)
self.assertEquals(actual, expected)
| import unittest
import json
import raco.fakedb
from raco import RACompiler
from raco.language import MyriaAlgebra
from myrialang import compile_to_json
class DatalogTestCase(unittest.TestCase):
def setUp(self):
self.db = raco.fakedb.FakeDatabase()
def execute_query(self, query):
'''Run a test query against the fake database'''
#print query
dlog = RACompiler()
dlog.fromDatalog(query)
#print dlog.logicalplan
dlog.optimize(target=MyriaAlgebra,
eliminate_common_subexpressions=False)
#print dlog.physicalplan
# test whether we can generate json without errors
json_string = json.dumps(compile_to_json(
"", dlog.logicalplan, dlog.physicalplan))
assert json_string
op = dlog.physicalplan[0][1]
output_op = raco.algebra.StoreTemp('__OUTPUT__', op)
self.db.evaluate(output_op)
return self.db.get_temp_table('__OUTPUT__')
def run_test(self, query, expected):
'''Execute a test query with an expected output'''
actual = self.execute_query(query)
self.assertEquals(actual, expected)
| bsd-3-clause | Python |
84adbc29ec97491b881f6636f6d74b94032cdeb7 | modify version code | DeanThompson/pyelong | pyelong/__init__.py | pyelong/__init__.py | # -*- coding: utf-8 -*-
from client import Client
from request import Request
from response import Response
__version__ = '0.3.6'
__all__ = ('Client', 'Request', 'Response')
| # -*- coding: utf-8 -*-
from client import Client
from request import Request
from response import Response
__version__ = '0.3.5'
__all__ = ('Client', 'Request', 'Response')
| mit | Python |
f443a8d254f58b0640d68a52185f83967a7ae7e5 | add trigram | danche354/Sequence-Labeling | tools/prepare.py | tools/prepare.py |
import numpy as np
import hashing
import conf
embedding_dict = conf.senna_dict
emb_vocab = conf.senna_vocab
auto_encoder_dict = conf.auto_encoder_dict
auto_vocab = conf.auto_vocab
step_length = conf.step_length
feature_length = conf.feature_length
ALL_IOB = conf.ALL_IOB_encode
NP_IOB = conf.NP_IOB_encode
POS = conf.POS_encode
def prepare_chunk_encoder(batch):
hashing = hashing.sen2matrix(batch)
return hashing
def prepare_chunk(batch, trigram=False, chunk_type='NP', step_length=step_length, feature_length=feature_length):
if chunk_type=='ALL':
IOB = ALL_IOB
else:
IOB = NP_IOB
embedding_index = []
auto_encoder_index = []
pos = []
label = []
sentence_length = []
sentences = []
for sentence in batch:
# sentence and sentence pos
sequence = list(sentence[0])
sequence_pos = list(sentence[1])
# for trigram
if trigram:
# add start and end mark
sequence.insert(0, '#')
sequence.append('#')
sequence_pos.insert(0, '#')
sequence_pos.append('#')
_embedding_index = [embedding_dict.get(each.strip().lower(), emb_vocab+1) for each in sequence]
_auto_encoder_index = [auto_encoder_dict.get(each.strip().lower(), auto_vocab+1) for each in sequence]
_sentence.append(sentence[0])
_pos = [POS[each] for each in sequence_pos]
_label = [IOB[each] for each in sentence[2]]
length = len(label)
_label.extend([0]*(step_length-length))
_embedding_index.extend([0]*(step_length-length))
_auto_encoder_index.extend([0]*(step_length-length))
embedding_index.append(_embedding_index)
auto_encoder_index.append(_auto_encoder_index)
pos.append(_pos)
label.append(_label)
# record the sentence length for calculate accuracy
sentence_length.append(length)
return np.array(embedding_index), np.array(auto_encoder_index), np.array(pos), np.array(label), np.array(sentence_length), sentences
|
import numpy as np
import hashing
import conf
embedding_dict = conf.senna_dict
emb_vocab = conf.senna_vocab
auto_encoder_dict = conf.auto_encoder_dict
auto_vocab = conf.auto_vocab
step_length = conf.step_length
feature_length = conf.feature_length
ALL_IOB = conf.ALL_IOB_encode
NP_IOB = conf.NP_IOB_encode
POS = conf.POS_encode
def prepare_chunk_encoder(batch):
hashing = hashing.sen2matrix(batch)
return hashing
def prepare_chunk(batch, trigram=False, chunk_type='NP', step_length=step_length, feature_length=feature_length):
if chunk_type=='ALL':
IOB = ALL_IOB
else:
IOB = NP_IOB
embedding_index = []
auto_encoder_index = []
pos = []
label = []
sentence_length = []
sentences = []
for sentence in batch:
# sentence and sentence pos
sequence = list(sentence[0])
sequence_pos = list(sentence[1])
if trigram:
# add start and end mark
sequence.insert(0, '#')
sequence.append('#')
sequence_pos.insert(0, '#')
sequence_pos.append('#')
_embedding_index = [embedding_dict.get(each.strip().lower(), emb_vocab+1) for each in sequence]
_auto_encoder_index = [auto_encoder_dict.get(each.strip().lower(), auto_vocab+1) for each in sequence]
_sentence.append(sentence[0])
_pos = [POS[each] for each in sequence_pos]
_label = [IOB[each] for each in sentence[2]]
length = len(label)
_label.extend([0]*(step_length-length))
_embedding_index.extend([0]*(step_length-length))
_auto_encoder_index.extend([0]*(step_length-length))
embedding_index.append(_embedding_index)
auto_encoder_index.append(_auto_encoder_index)
pos.append(_pos)
label.append(_label)
# record the sentence length for calculate accuracy
sentence_length.append(length)
return np.array(embedding_index), np.array(auto_encoder_index), np.array(pos), np.array(label), np.array(sentence_length), sentences
| mit | Python |
99feb65afd3ab18791932b50aa11f76c8f45d5d8 | use text() in XPath query | eka/pygrabbit | pygrabbit/parser.py | pygrabbit/parser.py | import requests
from lxml import html
from ._compat import urljoin
from ._helpers import cached_attribute
class PyGrabbit:
headers = {'User-agent': 'PyGrabbit 0.1'}
def __init__(self, url):
self.url = url
self._tree = None
self._content = requests.get(url, headers=self.headers).text
self._tree = html.fromstring(self._content)
def _image_absolute_uri(self, image_path):
return urljoin(self.url, image_path)
def select(self, *queries):
for query in queries:
node = self._tree.xpath(query)
if node:
return node
return []
@cached_attribute
def title(self):
text = self.select(
'//meta[@property="og:title"]/@content',
'//meta[@name="twitter:title"]/@content',
'//title/text()',
)
if text:
return text[0].strip()
@cached_attribute
def description(self):
text = self.select(
'//meta[@property="og:description"]/@content',
'//meta[@name="description"]/@content',
)
if text:
return text[0].strip()
@cached_attribute
def images(self):
nodes = self.select(
'//meta[@property="og:image"]/@content',
'//meta[@name="twitter:image:src"]/@content',
'//img[@id="main-image" or @id="prodImage"]/@src',
'//img[not(ancestor::*[contains(@id, "sidebar") or contains(@id, "comment") or contains(@id, "footer") or contains(@id, "header")]) and ancestor::*[contains(@id, "content")]]/@src',
'//img[not(ancestor::*[contains(@id, "sidebar") or contains(@id, "comment") or contains(@id, "footer") or contains(@id, "header")])]/@src',
'//img/@src',
)
return [self._image_absolute_uri(k) for k in nodes]
| import requests
from lxml import html
from ._compat import urljoin
from ._helpers import cached_attribute
class PyGrabbit:
headers = {'User-agent': 'PyGrabbit 0.1'}
def __init__(self, url):
self.url = url
self._tree = None
self._content = requests.get(url, headers=self.headers).text
self._tree = html.fromstring(self._content)
def _image_absolute_uri(self, image_path):
return urljoin(self.url, image_path)
def select(self, *queries):
for query in queries:
node = self._tree.xpath(query)
if node:
return node
return []
@cached_attribute
def title(self):
text = self.select(
'//meta[@property="og:title"]/@content',
'//meta[@name="twitter:title"]/@content',
)
if text:
return text[0].strip()
res = self.select('//title')
if res:
res = res[0].text.strip()
return res
@cached_attribute
def description(self):
text = self.select(
'//meta[@property="og:description"]/@content',
'//meta[@name="description"]/@content',
)
if text:
return text[0].strip()
@cached_attribute
def images(self):
nodes = self.select(
'//meta[@property="og:image"]/@content',
'//meta[@name="twitter:image:src"]/@content',
'//img[@id="main-image" or @id="prodImage"]/@src',
'//img[not(ancestor::*[contains(@id, "sidebar") or contains(@id, "comment") or contains(@id, "footer") or contains(@id, "header")]) and ancestor::*[contains(@id, "content")]]/@src',
'//img[not(ancestor::*[contains(@id, "sidebar") or contains(@id, "comment") or contains(@id, "footer") or contains(@id, "header")])]/@src',
'//img/@src',
)
return [self._image_absolute_uri(k) for k in nodes]
| mit | Python |
b8f33283014854bfa2d92896e2061bf17de00836 | Revert "Try not importing basal_area_increment to init" | tesera/pygypsy,tesera/pygypsy | pygypsy/__init__.py | pygypsy/__init__.py | """pygypsy
Based on Hueng et all (2009)
Huang, S., Meng, S. X., & Yang, Y. (2009). A growth and yield projection system
(GYPSY) for natural and post-harvest stands in Alberta. Forestry Division,
Alberta Sustainable Resource Development, 25.
Important Acronyms:
aw = white aspen
sb = black spruce
sw = white spruce
pl = logdepole pine
bhage = Breast Height Age
tage = Total age
si_<xx> = estimated site index for species <xx>
y2bh = years until breast height age can be measured
"""
import os
import matplotlib
from ._version import get_versions
import pygypsy.basal_area_increment
__version__ = get_versions()['version']
del get_versions
# Force matplotlib to not use any Xwindows backend so that headless docker works
matplotlib.use('Agg')
__all__ = ['basal_area_increment']
| """pygypsy
Based on Hueng et all (2009)
Huang, S., Meng, S. X., & Yang, Y. (2009). A growth and yield projection system
(GYPSY) for natural and post-harvest stands in Alberta. Forestry Division,
Alberta Sustainable Resource Development, 25.
Important Acronyms:
aw = white aspen
sb = black spruce
sw = white spruce
pl = logdepole pine
bhage = Breast Height Age
tage = Total age
si_<xx> = estimated site index for species <xx>
y2bh = years until breast height age can be measured
"""
import os
import matplotlib
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# Force matplotlib to not use any Xwindows backend so that headless docker works
matplotlib.use('Agg')
| mit | Python |
954e4882a496433162313110ac7f6bbe2b2eaaba | update version number | Synss/pyhard2 | pyhard2/__init__.py | pyhard2/__init__.py | __version__ = u"1.0.0 beta"
| __version__ = u"0.9.8c alpha"
| mit | Python |
740a06df7e92ff53c9b5a66c06f97476193b8799 | add an example for the marker usage | ianupright/micropsi2,ianupright/micropsi2,printedheart/micropsi2,printedheart/micropsi2,ianupright/micropsi2,printedheart/micropsi2 | micropsi_core/tests/test_node.py | micropsi_core/tests/test_node.py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Tests for node, nodefunction and the like
"""
from micropsi_core.nodenet.node import Nodetype
from micropsi_core.nodenet.nodefunctions import register, concept
from micropsi_core import runtime as micropsi
import pytest
@pytest.mark.engine("theano_engine")
def test_nodetype_function_definition_overwrites_default_function_name_theano(fixed_nodenet):
nodenet = micropsi.get_nodenet(fixed_nodenet)
nodetype = nodenet.get_standard_nodetype_definitions()['Register'].copy()
foo = Nodetype(nodenet=nodenet, **nodetype)
assert foo.nodefunction == register
nodetype['nodefunction_definition'] = 'return 17'
foo = Nodetype(nodenet=nodenet, **nodetype)
assert foo.nodefunction != register
assert foo.nodefunction(nodenet, None) == 17
@pytest.mark.engine("dict_engine")
def test_nodetype_function_definition_overwrites_default_function_name(fixed_nodenet):
nodenet = micropsi.get_nodenet(fixed_nodenet)
nodetype = nodenet.get_standard_nodetype_definitions()['Concept'].copy()
foo = Nodetype(nodenet=nodenet, **nodetype)
assert foo.nodefunction == concept
nodetype['nodefunction_definition'] = 'return 17'
foo = Nodetype(nodenet=nodenet, **nodetype)
assert foo.nodefunction != concept
assert foo.nodefunction(nodenet, None) == 17
| #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Tests for node, nodefunction and the like
"""
from micropsi_core.nodenet.node import Nodetype
from micropsi_core.nodenet.nodefunctions import concept
from micropsi_core import runtime as micropsi
def test_nodetype_function_definition_overwrites_default_function_name(fixed_nodenet):
nodenet = micropsi.get_nodenet(fixed_nodenet)
nodetype = nodenet.get_standard_nodetype_definitions()['Concept'].copy()
foo = Nodetype(nodenet=nodenet, **nodetype)
assert foo.nodefunction == concept
nodetype['nodefunction_definition'] = 'return 17'
foo = Nodetype(nodenet=nodenet, **nodetype)
assert foo.nodefunction != concept
assert foo.nodefunction(nodenet, None) == 17
| mit | Python |
3a61b480285c5a7d0f99ae91fbd278679444c0c3 | add some comments | thefab/tornadis,thefab/tornadis | tornadis/pool.py | tornadis/pool.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tornadis library released under the MIT license.
# See the LICENSE file for more information.
import tornado.gen
import toro
import functools
from collections import deque
from tornadis.client import Client
from tornadis.utils import ContextManagerFuture
# FIXME: error handling
class ClientPool(object):
"""High level object to deal with a pool of redis clients
Attributes:
client_kwargs (dict): Client constructor arguments
__sem (toro.Semaphore): Semaphore object to deal with pool limits
(only when max_size != -1)
__pool (deque): double-ended queue which contains pooled client objets
"""
def __init__(self, max_size=-1, **client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit")
(dict): Client constructor arguments
client_kwargs (dict): Client constructor arguments
"""
self.max_size = max_size
self.client_kwargs = client_kwargs
self.__pool = deque()
if self.max_size != -1:
self.__sem = toro.Semaphore(self.max_size)
else:
self.__sem = None
@tornado.gen.coroutine
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result.
"""
if self.__sem is not None:
yield self.__sem.acquire()
try:
client = self.__pool.popleft()
except IndexError:
client = self._make_client()
yield client.connect()
raise tornado.gen.Return(client)
def connected_client(self):
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object
"""
self.__pool.append(client)
if self.__sem is not None:
self.__sem.release()
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
client.disconnect()
except IndexError:
break
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tornadis library released under the MIT license.
# See the LICENSE file for more information.
import tornado.gen
import toro
import functools
from collections import deque
from tornadis.client import Client
from tornadis.utils import ContextManagerFuture
# FIXME: error handling
class ClientPool(object):
max_size = None
client_kwargs = None
read_callback = None
__sem = None
__pool = None
def __init__(self, max_size=-1, **kwargs):
self.max_size = max_size
self.client_kwargs = kwargs
self.__pool = deque()
if self.max_size != -1:
self.__sem = toro.Semaphore(self.max_size)
@tornado.gen.coroutine
def get_connected_client(self):
if self.__sem is not None:
yield self.__sem.acquire()
try:
client = self.__pool.popleft()
except IndexError:
client = self._make_client()
yield client.connect()
raise tornado.gen.Return(client)
def connected_client(self):
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
self.__pool.append(client)
if self.__sem is not None:
self.__sem.release()
def destroy(self):
while True:
try:
client = self.__pool.popleft()
client.disconnect()
except IndexError:
break
def _make_client(self):
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
| mit | Python |
51814e57103e75deaec11af81d85d13ed80ec594 | Bump version number to 0.2 | mila-udem/fuel,aalmah/fuel,capybaralet/fuel,dmitriy-serdyuk/fuel,capybaralet/fuel,aalmah/fuel,dribnet/fuel,dribnet/fuel,dmitriy-serdyuk/fuel,udibr/fuel,vdumoulin/fuel,udibr/fuel,mila-udem/fuel,vdumoulin/fuel | fuel/version.py | fuel/version.py | version = '0.2.0'
| version = '0.1.1'
| mit | Python |
72ce164a461987f7b9d35ac9a2b3a36386b7f8c9 | Add possibility of passing priority for adding an observer | berendkleinhaneveld/Registrationshop,berendkleinhaneveld/Registrationshop | ui/Interactor.py | ui/Interactor.py | """
Interactor
This class can be used to simply managing callback resources.
Callbacks are often used by interactors with vtk and callbacks
are hard to keep track of.
Use multiple inheritance to inherit from this class to get access
to the convenience methods.
Observers for vtk events can be added through
AddObserver(obj, eventName, callbackFunction) and when it is time
to clean up, just call cleanUpCallbacks().
:Authors:
Berend Klein Haneveld
"""
class Interactor(object):
"""
Interactor
"""
def __init__(self):
super(Interactor, self).__init__()
def AddObserver(self, obj, eventName, callbackFunction, priority=None):
"""
Creates a callback and stores the callback so that later
on the callbacks can be properly cleaned up.
"""
if not hasattr(self, "_callbacks"):
self._callbacks = []
if priority is not None:
callback = obj.AddObserver(eventName, callbackFunction, priority)
else:
callback = obj.AddObserver(eventName, callbackFunction)
self._callbacks.append((obj, callback))
def cleanUpCallbacks(self):
"""
Cleans up the vtkCallBacks
"""
if not hasattr(self, "_callbacks"):
return
for obj, callback in self._callbacks:
obj.RemoveObserver(callback)
self._callbacks = []
| """
Interactor
This class can be used to simply managing callback resources.
Callbacks are often used by interactors with vtk and callbacks
are hard to keep track of.
Use multiple inheritance to inherit from this class to get access
to the convenience methods.
Observers for vtk events can be added through
AddObserver(obj, eventName, callbackFunction) and when it is time
to clean up, just call cleanUpCallbacks().
:Authors:
Berend Klein Haneveld
"""
class Interactor(object):
"""
Interactor
"""
def __init__(self):
super(Interactor, self).__init__()
def AddObserver(self, obj, eventName, callbackFunction):
"""
Creates a callback and stores the callback so that later
on the callbacks can be properly cleaned up.
"""
if not hasattr(self, "_callbacks"):
self._callbacks = []
callback = obj.AddObserver(eventName, callbackFunction)
self._callbacks.append((obj, callback))
def cleanUpCallbacks(self):
"""
Cleans up the vtkCallBacks
"""
if not hasattr(self, "_callbacks"):
return
for obj, callback in self._callbacks:
obj.RemoveObserver(callback)
self._callbacks = []
| mit | Python |
5fc75cda8c56145ee803943f018420620db186db | add history command | bjarneo/Pytify,bjarneo/Pytify,jaruserickson/spotiplay | pytify/commander.py | pytify/commander.py | from __future__ import absolute_import, unicode_literals
class Commander():
def __init__(self, Pytifylib):
self.pytify = Pytifylib
def parse(self, command):
if command[0] != '/':
return ''
command = command.replace('/', '')
return command
def commands(self):
return {
'help': 'list all commands',
'current': 'print current song (currently linux only)',
'next': 'play next song',
'prev': 'play previous song',
'pp': 'play or pause song',
'stop': 'stop',
'history': 'last five search results'
}
def validate(self, command):
for key in self.commands():
if command != key:
continue
return True
return False
def help(self):
print('\nCommands:')
for key, val in self.commands().items():
print(' {0:20} {1:75}'.format(key, val))
print('\n')
def run(self, command):
command = self.parse(command)
if not command:
return False
if not self.validate(command):
self.help()
if command == 'help':
self.help()
elif command == 'current':
print(self.pytify.get_current_playing())
elif command == 'next':
self.pytify.next()
elif command == 'prev':
self.pytify.prev()
elif command == 'pp':
self.pytify.play_pause()
elif command == 'stop':
self.pytify.stop()
elif command == 'history':
self.pytify.print_history()
return True
| from __future__ import absolute_import, unicode_literals
class Commander():
def __init__(self, Pytifylib):
self.pytify = Pytifylib
def parse(self, command):
if command[0] != '/':
return ''
command = command.replace('/', '')
return command
def commands(self):
return {
'help': 'list all commands',
'current': 'print current song (currently linux only)',
'next': 'play next song',
'prev': 'play previous song',
'pp': 'play or pause song',
'stop': 'stop'
}
def validate(self, command):
for key in self.commands():
if command != key:
continue
return True
return False
def help(self):
print('\nCommands:')
for key, val in self.commands().items():
print(' {0:20} {1:75}'.format(key, val))
print('\n')
def run(self, command):
command = self.parse(command)
if not command:
return False
if not self.validate(command):
self.help()
if command == 'help':
self.help()
elif command == 'current':
print(self.pytify.get_current_playing())
elif command == 'next':
self.pytify.next()
elif command == 'prev':
self.pytify.prev()
elif command == 'pp':
self.pytify.play_pause()
elif command == 'stop':
self.pytify.stop()
return True
| mit | Python |
2370c601f087e0d342287c400654a9ceab22c504 | Add function to send email from a template | renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar | radar/radar/mail.py | radar/radar/mail.py | import smtplib
from email.mime.text import MIMEText, MIMEMultipart
from flask import current_app
COMMA_SPACE = ', '
def send_email(to_addresses, subject, message_plain, message_html=None, from_address=None):
if from_address is None:
from_address = current_app.config.get('FROM_ADDRESS', 'bot@radar.nhs.uk')
send_emails = current_app.config.get('SEND_EMAILS', not current_app.debug)
m = MIMEMultipart('alternative')
m['Subject'] = subject
m['From'] = from_address
m['To'] = COMMA_SPACE.join(to_addresses)
m_plain = MIMEText(message_plain, 'plain')
m.attach(m_plain)
if message_html is not None:
m_html = MIMEText(message_html, 'html')
m.attach(m_html)
if send_emails:
smtp_host = current_app.config.get('SMTP_HOST', 'localhost')
smtp_port = current_app.config.get('SMTP_PORT', 25)
s = smtplib.SMTP(smtp_host, smtp_port)
s.sendmail(from_address, to_addresses, m)
s.quit()
else:
print m.to_string()
def send_email_from_template(to_addresses, subject, template_name, context, from_address=None):
template_path_plain = 'email/%s.txt' % template_name
template_path_html = 'email/%s.html' % template_name
message_plain = render_template(template_path_plain, **context)
message_html = render_template(template_path_html, **context)
send_email(to_addresses, subject, message_plain, message_html, from_address)
| import smtplib
from email.mime.text import MIMEText, MIMEMultipart
from flask import current_app
COMMA_SPACE = ', '
def send_email(to_addresses, subject, message_plain, message_html=None, from_address=None):
if from_address is None:
from_address = current_app.config.get('FROM_ADDRESS', 'bot@radar.nhs.uk')
send_emails = current_app.config.get('SEND_EMAILS', not current_app.debug)
m = MIMEMultipart('alternative')
m['Subject'] = subject
m['From'] = from_address
m['To'] = COMMA_SPACE.join(to_addresses)
m_plain = MIMEText(message_plain, 'plain')
m.attach(m_plain)
if message_html is not None:
m_html = MIMEText(message_html, 'html')
m.attach(m_html)
if send_emails:
smtp_host = current_app.config.get('SMTP_HOST', 'localhost')
smtp_port = current_app.config.get('SMTP_PORT', 25)
s = smtplib.SMTP(smtp_host, smtp_port)
s.sendmail(from_address, to_addresses, m)
s.quit()
else:
print m.to_string()
| agpl-3.0 | Python |
281cf28a3eb64442957ce76d5681c4fba9c66d23 | Update corpus docstring (no need to fit dictionary first). | escherba/glove-python,joshloyal/glove-python,ChristosChristofidis/glove-python,maciejkula/glove-python | glove/corpus.py | glove/corpus.py | # Cooccurrence matrix construction tools
# for fitting the GloVe model.
try:
# Python 2 compat
import cPickle as pickle
except ImportError:
import pickle
from .corpus_cython import construct_cooccurrence_matrix
class Corpus(object):
"""
Class for constructing a cooccurrence matrix
from a corpus.
"""
def __init__(self):
self.dictionary = None
self.matrix = None
def fit(self, corpus, window=10):
"""
Perform a pass through the corpus to construct
the cooccurrence matrix.
Parameters:
- iterable of lists of strings corpus
- int window: the length of the (symmetric)
context window used for cooccurrence.
"""
self.dictionary, self.matrix = construct_cooccurrence_matrix(corpus,
int(window))
def save(self, filename):
with open(filename, 'wb') as savefile:
pickle.dump((self.dictionary, self.matrix),
savefile,
protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, filename):
instance = cls()
with open(filename, 'rb') as savefile:
instance.dictionary, instance.matrix = pickle.load(savefile)
return instance
| # Cooccurrence matrix construction tools
# for fitting the GloVe model.
try:
# Python 2 compat
import cPickle as pickle
except ImportError:
import pickle
from .corpus_cython import construct_cooccurrence_matrix
class Corpus(object):
"""
Class for constructing a cooccurrence matrix
from a corpus.
"""
def __init__(self):
self.dictionary = None
self.matrix = None
def fit(self, corpus, window=10):
"""
Perform a pass through the corpus to construct
the cooccurrence matrix.
You must call fit_dictionary first.
Parameters:
- iterable of lists of strings corpus
- int window: the length of the (symmetric)
context window used for cooccurrence.
"""
self.dictionary, self.matrix = construct_cooccurrence_matrix(corpus,
int(window))
def save(self, filename):
with open(filename, 'wb') as savefile:
pickle.dump((self.dictionary, self.matrix),
savefile,
protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, filename):
instance = cls()
with open(filename, 'rb') as savefile:
instance.dictionary, instance.matrix = pickle.load(savefile)
return instance
| apache-2.0 | Python |
7919ee8374b3f3d15d45ee04f50c1de82f91b19d | Implement authentication and added resources | GooeeIOT/gooee-python-sdk | gooee/client.py | gooee/client.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from platform import platform
import requests
from .compat import json
from .decorators import resource
from .exceptions import (
IllegalHttpMethod,
)
from . import __version__
from .utils import (
format_path,
GOOEE_API_URL
)
class Gooee(object):
"""Gooee HTTP client class."""
allowed_methods = ['post', 'get', 'delete']
def __init__(self, api_base_url=GOOEE_API_URL):
self.api_base_url = api_base_url
self.auth_token = ''
def authenticate(self, username, password):
payload = {"username": username,
"password": password}
token = self.post('/auth/login', payload).get('token')
self.auth_token = 'JWT {token}'.format(token=token)
@property
def headers(self):
headers = {
"content-type": "application/json",
"Authorization": self.auth_token,
"User-Agent": "gooee-python-sdk {version} ({system})".format(
version=__version__,
system=platform(),
)
}
return headers
def api(self, method, path, data):
method = method.strip().lower()
if method not in self.allowed_methods:
msg = "The '{0}' method is not accepted by Gooee SDK.".format(method)
raise IllegalHttpMethod(msg)
method = getattr(self, method)
return method(path, data)
@resource
def get(self, path, data=None):
path = format_path(path, self.api_base_url)
if data is None:
data = {}
return requests.get(path, headers=self.headers, params=data or {})
@resource
def post(self, path, data=None):
path = format_path(path, self.api_base_url)
json_data = json.dumps(data or {})
return requests.post(path, headers=self.headers, data=json_data)
@resource
def put(self, path, data=None):
path = format_path(path, self.api_base_url)
json_data = json.dumps(data or {})
return requests.put(path, headers=self.headers, data=json_data)
@resource
def delete(self, path, data=None):
path = format_path(path, self.api_base_url)
return requests.delete(path, headers=self.headers, data=data or {})
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from platform import platform
import requests
from .compat import json
from .exceptions import (
IllegalHttpMethod,
)
from . import __version__
from .utils import (
format_path,
GOOEE_API_URL
)
class Gooee(object):
"""Gooee HTTP client class."""
allowed_methods = ['post', 'get', 'delete']
def __init__(self, oauth_token, api_base_url=GOOEE_API_URL):
self.oauth_token = oauth_token
self.api_base_url = api_base_url
@property
def headers(self):
headers = {
"content-type": "application/json",
"Authorization": "",
"User-Agent": "gooee-python-sdk {version} ({system})".format(
version=__version__,
system=platform(),
)
}
return headers
def api(self, method, path, data):
method = method.strip().lower()
if method not in self.allowed_methods:
msg = "The '{0}' method is not accepted by Gooee SDK.".format(method)
raise IllegalHttpMethod(msg)
method = getattr(self, method)
return method(path, data)
def get(self, path, data=None):
path = format_path(path, self.api_base_url)
if data is None:
data = {}
return requests.get(path, headers=self.headers, params=data or {})
def post(self, path, data=None):
path = format_path(path, self.api_base_url)
json_data = json.dumps(data or {})
return requests.post(path, headers=self.headers, data=json_data)
def put(self, path, data=None):
path = format_path(path, self.api_base_url)
json_data = json.dumps(data or {})
return requests.put(path, headers=self.headers, data=json_data)
def delete(self, path, data=None):
path = format_path(path, self.api_base_url)
return requests.delete(path, headers=self.headers, data=data or {})
| apache-2.0 | Python |
5ae3172898b65c3a63c85cecb4355ccf8cb34e54 | raise error if no file is provided | RasaHQ/rasa_core,RasaHQ/rasa_nlu,RasaHQ/rasa_core,RasaHQ/rasa_nlu,RasaHQ/rasa_core,RasaHQ/rasa_nlu | rasa_core/config.py | rasa_core/config.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from typing import Optional, Text, Dict, Any, List
from rasa_core import utils
from rasa_core.policies import PolicyEnsemble
def load(config_file):
# type: (Optional[Text], Dict[Text, Any], int) -> List[Policy]
"""Load policy data stored in the specified file. fallback_args and
max_history are typically command line arguments. They take precedence
over the arguments specified in the config yaml.
"""
if config_file:
config_data = utils.read_yaml_file(config_file)
else:
raise ValueError("You have to provide a config file")
return PolicyEnsemble.from_dict(config_data)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from typing import Optional, Text, Dict, Any, List
from rasa_core import utils
from rasa_core.policies import PolicyEnsemble
def load(config_file):
# type: (Optional[Text], Dict[Text, Any], int) -> List[Policy]
"""Load policy data stored in the specified file. fallback_args and
max_history are typically command line arguments. They take precedence
over the arguments specified in the config yaml.
"""
config_data = utils.read_yaml_file(config_file)
return PolicyEnsemble.from_dict(config_data)
| apache-2.0 | Python |
d593d3d8f8487cdc933c088a83b632b4baaf8e8a | add fsType to config.file | LiquidSame/rbh-quota | rbh_quota/config.py | rbh_quota/config.py | #!/usr/bin/env python
import ConfigParser
import socket
from os.path import expanduser
Config = ConfigParser.ConfigParser()
Config.read(expanduser('~/.rbh-quota.ini'))
try:
db_host = Config.get('rbh-quota_api', 'db_host')
except:
db_host = ''
try:
db_user = Config.get('rbh-quota_api', 'db_user')
except:
db_user = ''
try:
db_pwd = Config.get('rbh-quota_api', 'db_pwd')
except:
db_pwd = ''
try:
db = Config.get('rbh-quota_api', 'db')
except:
db = ''
try:
fsType = Config.get('rbh-quota_api', 'fsType')
except:
fsType = ''
try:
alerts = Config.get('rbh-quota_api', 'alerts')
except:
alerts = False
try:
domain = Config.get('rbh-quota_api', 'domain')
except:
domain = ''
try:
server = Config.get('rbh-quota_api', 'smtp_server')
except:
server = ''
try:
sender = Config.get('rbh-quota_api', 'sender')
except:
sender = 'rbh-quota'
try:
copy = Config.get('rbh-quota_api', 'copy')
except:
copy = 'administrator'
try:
webHost = Config.get('rbh-quota_api', 'webHost')
except:
webHost = socket.gethostname()
| #!/usr/bin/env python
import ConfigParser
import socket
from os.path import expanduser
Config = ConfigParser.ConfigParser()
Config.read(expanduser('~/.rbh-quota.ini'))
try:
db_host = Config.get('rbh-quota_api', 'db_host')
except:
db_host = ''
try:
db_user = Config.get('rbh-quota_api', 'db_user')
except:
db_user = ''
try:
db_pwd = Config.get('rbh-quota_api', 'db_pwd')
except:
db_pwd = ''
try:
db = Config.get('rbh-quota_api', 'db')
except:
db = ''
try:
alerts = Config.get('rbh-quota_api', 'alerts')
except:
alerts = False
try:
domain = Config.get('rbh-quota_api', 'domain')
except:
domain = ''
try:
server = Config.get('rbh-quota_api', 'smtp_server')
except:
server = ''
try:
sender = Config.get('rbh-quota_api', 'sender')
except:
sender = 'rbh-quota'
try:
copy = Config.get('rbh-quota_api', 'copy')
except:
copy = 'administrator'
try:
webHost = Config.get('rbh-quota_api', 'webHost')
except:
webHost = socket.gethostname()
| mit | Python |
2b724f32ecd311ce526b433ec81399c7a8e8e202 | Update settings.py | Programmeerclub-WLG/Agenda-App | gui/settings.py | gui/settings.py |
"""
Dit is het bestand voor het aanpassen van instellingen scherm voor de Agenda-App
Het heeft een aantal functies en dezen staat beschreven in de drive.
<LICENSE>
<COPYRIGHT NOTICE>
<DEVELOPER>
<VERSION and DATE>
"""
| apache-2.0 | Python | |
4a4fcd362865bb8ae0d23f9232e9e2e4c3cef0a0 | Add error check for body | poliastro/poliastro | src/poliastro/twobody/mean_elements.py | src/poliastro/twobody/mean_elements.py | import erfa
from astropy import units as u
from astropy.coordinates.solar_system import PLAN94_BODY_NAME_TO_PLANET_INDEX
from poliastro.bodies import SOLAR_SYSTEM_BODIES
from ..constants import J2000
from ..frames import Planes
from .states import RVState
def get_mean_elements(body, epoch=J2000):
"""Get ecliptic mean elements of body.
Parameters
----------
body : ~poliastro.bodies.SolarSystemPlanet
Body.
epoch : astropy.time.Time
Epoch.
"""
# Internally, erfa.plan94 has a table of classical elements,
# which are then converted to position and velocity...
# So we are reverting the conversion in the last line
# This way at least we avoid copy pasting the values
try:
if body not in SOLAR_SYSTEM_BODIES:
raise ValueError(
f"The input body '{body}' is invalid. It must be an instance of `poliastro.bodies.SolarSystemPlanet`."
)
name = body.name.lower()
if name == "earth":
name = "earth-moon-barycenter"
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[name]
body_pv_helio = erfa.plan94(epoch.jd1, epoch.jd2, body_index)
r = body_pv_helio["p"] * u.au
v = body_pv_helio["v"] * u.au / u.day
except KeyError as e:
raise ValueError(f"No available mean elements for {body}") from e
return RVState(body.parent, r, v, plane=Planes.EARTH_ECLIPTIC).to_classical()
| import erfa
from astropy import units as u
from astropy.coordinates.solar_system import PLAN94_BODY_NAME_TO_PLANET_INDEX
from ..constants import J2000
from ..frames import Planes
from .states import RVState
from poliastro.bodies import Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune
SOLAR_SYSTEM_BODIES = [Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune]
def get_mean_elements(body, epoch=J2000):
"""Get ecliptic mean elements of body.
Parameters
----------
body : ~poliastro.bodies.SolarSystemPlanet
Body.
epoch : astropy.time.Time
Epoch.
"""
# Internally, erfa.plan94 has a table of classical elements,
# which are then converted to position and velocity...
# So we are reverting the conversion in the last line
# This way at least we avoid copy pasting the values
try:
if body not in SOLAR_SYSTEM_BODIES:
raise ValueError("The input body is invalid. Pass an instance of `poliastro.bodies.SolarSystemPlanet` instead.")
name = body.name.lower()
if name == "earth":
name = "earth-moon-barycenter"
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[name]
body_pv_helio = erfa.plan94(epoch.jd1, epoch.jd2, body_index)
r = body_pv_helio["p"] * u.au
v = body_pv_helio["v"] * u.au / u.day
except KeyError as e:
raise ValueError(f"No available mean elements for {body}") from e
return RVState(body.parent, r, v, plane=Planes.EARTH_ECLIPTIC).to_classical()
| mit | Python |
06d9c54dc06db2ea7d2315bd2705fc55d06587a5 | Add function numeric typed weekday to string typed weekday. | why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado | helper/korea.py | helper/korea.py | #
# dp for Tornado
# YoungYong Park (youngyongpark@gmail.com)
# 2014.10.23
#
from __future__ import absolute_import
from engine.helper import Helper as dpHelper
class KoreaHelper(dpHelper):
def readable_phone_number(self, number, separator='-'):
number = str(self.helper.numeric.extract_numbers(number))
l = len(number)
if l == 8:
return '%s%s%s' % (number[0:4], separator, number[4:8])
elif l == 9:
return '%s%s%s%s%s' % (number[0:2], separator, number[2:5], separator, number[5:9])
elif l == 10:
three = ("010", "011", "016", "017", "018", "019",
"053", "031", "032", "033", "041", "042",
"043", "051", "052", "054", "055", "061",
"062", "063", "064", "070", "060", "050")
if number[0:3] in three:
return '%s%s%s%s%s' % (number[0:3], separator, number[3:6], separator, number[6:10])
else:
return '%s%s%s%s%s' % (number[0:2], separator, number[2:6], separator, number[6:10])
elif l == 11:
return '%s%s%s%s%s' % (number[0:3], separator, number[3:7], separator, number[7:11])
else:
return number
def weekday(self, w, short=False, isoweekday=True):
weekdays = {
0: '월',
1: '화',
2: '수',
3: '목',
4: '금',
5: '토',
6: '일'
}
w = int(w)
if isoweekday:
w -= 1
return '%s요일' % weekdays[w] if not short else weekdays[w] | #
# dp for Tornado
# YoungYong Park (youngyongpark@gmail.com)
# 2014.10.23
#
from __future__ import absolute_import
from engine.helper import Helper as dpHelper
class KoreaHelper(dpHelper):
def readable_phone_number(self, number, separator='-'):
number = str(self.helper.numeric.extract_numbers(number))
l = len(number)
if l == 8:
return '%s%s%s' % (number[0:4], separator, number[4:8])
elif l == 9:
return '%s%s%s%s%s' % (number[0:2], separator, number[2:5], separator, number[5:9])
elif l == 10:
three = ("010", "011", "016", "017", "018", "019",
"053", "031", "032", "033", "041", "042",
"043", "051", "052", "054", "055", "061",
"062", "063", "064", "070", "060", "050")
if number[0:3] in three:
return '%s%s%s%s%s' % (number[0:3], separator, number[3:6], separator, number[6:10])
else:
return '%s%s%s%s%s' % (number[0:2], separator, number[2:6], separator, number[6:10])
elif l == 11:
return '%s%s%s%s%s' % (number[0:3], separator, number[3:7], separator, number[7:11])
else:
return number | mit | Python |
addb5337bff43888d500f2782778b26a1e976581 | Bump version number to 0.2.10 | kblin/ncbi-genome-download,kblin/ncbi-genome-download | ncbi_genome_download/__init__.py | ncbi_genome_download/__init__.py | """Download genome files from the NCBI"""
from .config import (
SUPPORTED_TAXONOMIC_GROUPS,
NgdConfig
)
from .core import (
args_download,
download,
argument_parser,
)
__version__ = '0.2.10'
__all__ = [
'download',
'args_download',
'SUPPORTED_TAXONOMIC_GROUPS',
'NgdConfig',
'argument_parser'
]
| """Download genome files from the NCBI"""
from .config import (
SUPPORTED_TAXONOMIC_GROUPS,
NgdConfig
)
from .core import (
args_download,
download,
argument_parser,
)
__version__ = '0.2.9'
__all__ = [
'download',
'args_download',
'SUPPORTED_TAXONOMIC_GROUPS',
'NgdConfig',
'argument_parser'
]
| apache-2.0 | Python |
e9bd4dc3285922ce1333b41b9eb2d97164b64257 | Update views to send proper responses | avinassh/nightreads,avinassh/nightreads | nightreads/user_manager/views.py | nightreads/user_manager/views.py | from django.views.generic import View
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from .forms import SubscribeForm, UnsubscribeForm, ConfirmEmailForm
from . import user_service
class SubscribeView(View):
form_class = SubscribeForm
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(SubscribeView, self).dispatch(request, *args, **kwargs)
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
tags = form.cleaned_data['tags']
user = user_service.get_or_create_user(email=email)
is_updated = user_service.update_user_tags(user=user, tags=tags)
if is_updated:
key = user_service.generate_key(user=user)
user_service.send_confirmation_email(
request=request, user=user, key=key)
return JsonResponse({'status': 'Email sent'})
return JsonResponse({'status': 'No tags updated'})
return JsonResponse({'errors': form.errors})
class UnsubscribeView(View):
form_class = UnsubscribeForm
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(UnsubscribeView, self).dispatch(request, *args, **kwargs)
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
user = user_service.get_user(email=email)
if not user:
return JsonResponse({'error': 'User Not Found'})
key = user_service.generate_key(user=user, for_subscription=False)
user_service.send_confirmation_email(
request=request, user=user, key=key, for_subscription=False)
return JsonResponse({'status': 'Email sent'})
return JsonResponse({'errors': form.errors})
class ConfirmEmailView(View):
form_class = ConfirmEmailForm
def get(self, request):
form = self.form_class(request.GET)
if form.is_valid():
is_subscribed = form.cleaned_data['subscribe']
user = form.cleaned_data['user']
user_service.update_subscription(user=user, status=is_subscribed)
if is_subscribed:
return JsonResponse({'status': 'Subscribed'})
else:
return JsonResponse({'status': 'Unsubscribed'})
return JsonResponse({'errors': form.errors})
| from django.views.generic import View
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from .forms import SubscribeForm, UnsubscribeForm, ConfirmEmailForm
from . import user_service
class SubscribeView(View):
form_class = SubscribeForm
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(SubscribeView, self).dispatch(request, *args, **kwargs)
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
tags = form.cleaned_data['tags']
user = user_service.get_or_create_user(email=email)
is_updated = user_service.update_user_tags(user=user, tags=tags)
if is_updated:
key = user_service.generate_key(user=user)
user_service.send_confirmation_email(
request=request, user=user, key=key)
return JsonResponse({'success': key})
return JsonResponse({'success': False})
class UnsubscribeView(View):
form_class = UnsubscribeForm
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(UnsubscribeView, self).dispatch(request, *args, **kwargs)
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
user = user_service.get_user(email=email)
if not user:
return JsonResponse({'success': False})
return JsonResponse({'success': True})
class ConfirmEmailView(View):
form_class = ConfirmEmailForm
def get(self, request):
form = self.form_class(request.GET)
if form.is_valid():
if form.cleaned_data['subscribe']:
return JsonResponse({'status': 'Subscribed'})
else:
return JsonResponse({'status': 'Unsubscribed'})
return JsonResponse({'success': form.errors})
| mit | Python |
9980c5defc34eb125ba5f2b39f0d9a67e6490269 | add regex conditional for n s e w | cds-amal/addressparser,cds-amal/addressparser,cds-amal/addressparser,cds-amal/addressparser | nyctext/neighborhoods/regexps.py | nyctext/neighborhoods/regexps.py | import re
from throughways import names as throughway_names
def make_neighorbood_regex(lHoods, city):
hoods = '|'.join(lHoods)
hoods = '(%s)' % hoods
# Don't match if neighborhood is followed
# by a thoroughfare name
names = throughway_names[1:-1] # remove parens
names = '(%s|%s|north|south|east|west)' % (names, city)
rex = '\\s(%s)(?![\s,]*(%s))' % (hoods, names)
return re.compile(rex, re.I)
| import re
from throughways import names as throughway_names
def make_neighorbood_regex(lHoods, city):
hoods = '|'.join(lHoods)
hoods = '(%s)' % hoods
# Don't match if neighborhood is followed
# by a thoroughfare name
names = throughway_names[1:-1] # remove parens
names = '(%s|%s)' % (names, city)
rex = '\\s(%s)(?![\s,]*(%s))' % (hoods, names)
return re.compile(rex, re.I)
| mit | Python |
31ae945c8d7d885dc019eaf7bb4e8c2dc4619dfc | Remove unneeded 'output_file' variable | mdpiper/dakota-experiments,mdpiper/dakota-experiments,mdpiper/dakota-experiments | experiments/delft3d-ps-1/run_delft3d.py | experiments/delft3d-ps-1/run_delft3d.py | #! /usr/bin/env python
# Brokers communication between Delft3D and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import shutil
from subprocess import call, check_output
import time
def job_is_running(job_id):
'''
Returns True if the PBS job with the given id is running.
'''
return call(['qstat', job_id]) == 0
def main():
'''
Brokers communication between Delft3D, MATLAB and Dakota through files.
'''
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
initialize_dir = os.path.join(start_dir, 'initialize')
input_template = 'WLD.sed.template'
input_file = 'WLD.sed'
cells_file = 'nesting.txt'
analysis_program = 'total_sed_cal'
analysis_program_file = analysis_program + '.m'
analysis_results_file = analysis_program + '.out'
# Copy the contents of the initialize directory into the current
# run directory. (Don't use shutil.copytree because the
# destination directory exists.)
for f in os.listdir(initialize_dir):
shutil.copy(os.path.join(initialize_dir, f), os.getcwd())
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# substitute the parameters from Dakota into the input template,
# creating a new Delft3D input file.
shutil.copy(os.path.join(start_dir, input_template), os.getcwd())
call(['dprepro', sys.argv[1], input_template, input_file])
# Call Delft3D, using the updated input file. Note that `qsub`
# returns immediately with the PBS job id.
r = check_output(['qsub', 'run_delft3d_wave.sh'])
job_id = r.rstrip('\n')
# Poll the Delft3D job every 10 min. Proceed when it's finished.
while job_is_running(job_id):
time.sleep(600)
# Call a MATLAB script to read the Delft3D output, calculate the
# desired responses, and write the Dakota results file.
shutil.copy(os.path.join(start_dir, cells_file), os.getcwd())
shutil.copy(os.path.join(start_dir, analysis_program_file), os.getcwd())
print('Current directory: ' + os.getcwd())
matlab_cmd = '-r "' + analysis_program + '; exit"'
r = call(['matlab', '-nodisplay', '-nosplash', matlab_cmd])
print('MATLAB exit status code = ' + str(r))
shutil.move(analysis_results_file, sys.argv[2])
if __name__ == '__main__':
main()
| #! /usr/bin/env python
# Brokers communication between Delft3D and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import shutil
from subprocess import call, check_output
import time
def job_is_running(job_id):
'''
Returns True if the PBS job with the given id is running.
'''
return call(['qstat', job_id]) == 0
def main():
'''
Brokers communication between Delft3D, MATLAB and Dakota through files.
'''
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
initialize_dir = os.path.join(start_dir, 'initialize')
input_template = 'WLD.sed.template'
input_file = 'WLD.sed'
output_file = 'trim-WLD.dat'
cells_file = 'nesting.txt'
analysis_program = 'total_sed_cal'
analysis_program_file = analysis_program + '.m'
analysis_results_file = analysis_program + '.out'
# Copy the contents of the initialize directory into the current
# run directory. (Don't use shutil.copytree because the
# destination directory exists.)
for f in os.listdir(initialize_dir):
shutil.copy(os.path.join(initialize_dir, f), os.getcwd())
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# substitute the parameters from Dakota into the input template,
# creating a new Delft3D input file.
shutil.copy(os.path.join(start_dir, input_template), os.getcwd())
call(['dprepro', sys.argv[1], input_template, input_file])
# Call Delft3D, using the updated input file. Note that `qsub`
# returns immediately with the PBS job id.
r = check_output(['qsub', 'run_delft3d_wave.sh'])
job_id = r.rstrip('\n')
# Poll the Delft3D job every 10 min. Proceed when it's finished.
while job_is_running(job_id):
time.sleep(600)
# Call a MATLAB script to read the Delft3D output, calculate the
# desired responses, and write the Dakota results file.
shutil.copy(os.path.join(start_dir, cells_file), os.getcwd())
shutil.copy(os.path.join(start_dir, analysis_program_file), os.getcwd())
print('Current directory: ' + os.getcwd())
matlab_call = '-r "' + analysis_program + '; exit"'
r = call(['matlab', '-nodisplay', '-nosplash', matlab_call])
print('MATLAB exit status code = ' + str(r))
shutil.move(analysis_results_file, sys.argv[2])
if __name__ == '__main__':
main()
| mit | Python |
e880aaa34fee44e1da1b9c3d7109c1fe500e9d04 | add version number | ojengwa/ibu,ojengwa/migrate | ibu/__init__.py | ibu/__init__.py | __version__ = '0.0.2'
docs_url = 'http://github.com'
| __version__ = '0.0.2'
| mit | Python |
9e413e02961975f36a134d601b3629c668a16fe9 | Use processes with celery and threads with multiprocessing | gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine | openquake/commands/with_tiles.py | openquake/commands/with_tiles.py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import os
from openquake.baselib import sap, general, parallel
from openquake.hazardlib import valid
from openquake.commonlib import readinput
from openquake.commands import engine
@sap.Script
def with_tiles(num_tiles, job_ini):
"""
Run a calculation by splitting the sites into tiles.
WARNING: this is experimental and meant only for GEM users
"""
oq = readinput.get_oqparam(job_ini)
num_sites = len(readinput.get_mesh(oq))
task_args = [(job_ini, slc)
for slc in general.split_in_slices(num_sites, num_tiles)]
if os.environ.get('OQ_DISTRIBUTE') == 'celery':
Starmap = parallel.Processmap # celery plays only with processes
else: # multiprocessing plays only with threads
Starmap = parallel.Threadmap
Starmap(engine.run_tile, task_args).reduce()
with_tiles.arg('num_tiles', 'number of tiles to generate',
type=valid.positiveint)
with_tiles.arg('job_ini', 'calculation configuration file '
'(or files, comma-separated)')
| # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from openquake.baselib import sap, general, parallel
from openquake.hazardlib import valid
from openquake.commonlib import readinput
from openquake.commands import engine
@sap.Script
def with_tiles(num_tiles, job_ini):
"""
Run a calculation by splitting the sites into tiles.
WARNING: this is experimental and meant only for GEM users
"""
oq = readinput.get_oqparam(job_ini)
num_sites = len(readinput.get_mesh(oq))
task_args = [(job_ini, slc) for slc in general.split_in_slices(
num_sites, num_tiles)]
parallel.Threadmap(engine.run_tile, task_args).reduce()
with_tiles.arg('num_tiles', 'number of tiles to generate',
type=valid.positiveint)
with_tiles.arg('job_ini', 'calculation configuration file '
'(or files, comma-separated)')
| agpl-3.0 | Python |
1ca6c23f6fce8f052890547890ab326e796e1cd4 | update database info | ThinkmanWang/NotesServer,ThinkmanWang/NotesServer,ThinkmanWang/NotesServer | utils/dbutils.py | utils/dbutils.py | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'models'))
#print(sys.path)
from mysql_python import MysqlPython
from models.User import User
from models.Customer import Customer
import MySQLdb
from DBUtils.PooledDB import PooledDB
import hashlib
import time
g_dbPool = PooledDB(MySQLdb, 5, host='123.59.78.245', user='notes', passwd='welc0me', db='db_notes', port=3306, charset = "utf8", use_unicode = True);
| import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'models'))
#print(sys.path)
from mysql_python import MysqlPython
from models.User import User
from models.Customer import Customer
import MySQLdb
from DBUtils.PooledDB import PooledDB
import hashlib
import time
g_dbPool = PooledDB(MySQLdb, 5, host='thinkman-wang.com', user='thinkman', passwd='Ab123456', db='db_notes', port=3306, charset = "utf8", use_unicode = True);
| apache-2.0 | Python |
b2bab632d3eac82b910d65e37fc78d8a0a3c2209 | set up active scanning | hbock/bgasync | utils/scanner.py | utils/scanner.py | """
Test utility to create a Bluetooth LE scan table.
"""
import sys
import argparse
from bgasync import api
from bgasync.twisted.protocol import BluegigaProtocol
from twisted.internet.serialport import SerialPort
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import deferLater
from twisted.internet import reactor
from twisted.python import log
class ScannerProtocol(BluegigaProtocol):
def handle_event_gap_scan_response(self, event):
print("Received advertising event [sender={}]: {}".format(api.get_address_string(event.sender), event))
@inlineCallbacks
def stop_discovery(protocol):
print("Stopping discovery.")
response = yield protocol.send_command(api.command_gap_end_procedure())
if response.result != 0:
print("Error stopping discovery! {}".format(api.get_error_code_string(response.result)))
# End program
reactor.stop()
@inlineCallbacks
def command_response_cb(response, protocol, duration):
if response.result != 0:
print("Error setting scan parameters: {}".format(api.get_error_code_string(response.result)))
response = yield protocol.send_command(api.command_gap_discover(mode=api.gap_discover_mode.discover_observation.value))
if response.result != 0:
print("Error starting discovery! {}".format(api.get_error_code_string(response.result)))
# If we're in the wrong state, we may have simply exited
# while still discovering.
# Try to fix it by ending the GAP procedure and start over.
if response.result == api.ERR_BGAPI_DEVICE_IN_WRONG_STATE:
response = yield protocol.send_command(api.command_gap_end_procedure())
# Need a Deferred delay, but no action.
yield deferLater(reactor, 0.1, lambda: None)
response = yield protocol.send_command(api.command_gap_discover(mode=api.gap_discover_mode.discover_observation.value))
# Otherwise, we're unable to proceed.
else:
print("Terminating on error.")
reactor.stop()
else:
print("Discovery started.")
reactor.callLater(duration, stop_discovery, protocol)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("serial_port", help="Serial/COM port connected to the Bluegiga device.")
parser.add_argument("-d", "--duration", type=int, help="Specify duration of device discovery", default=60)
parser.add_argument("-v", "--verbose", action="store_true", help="Turn on verbose logging")
args = parser.parse_args()
print("Starting scanner (duration {} seconds)".format(args.duration))
protocol = ScannerProtocol()
port = SerialPort(protocol, args.serial_port, reactor, baudrate=256000)
d = protocol.send_command(api.command_gap_set_scan_parameters(scan_interval=0x4B, scan_window=0x32, active=1))
d.addCallback(command_response_cb, protocol, args.duration)
if args.verbose:
log.startLogging(sys.stderr, setStdout=False)
reactor.run()
if __name__ == "__main__":
main()
| """
Test utility to create a Bluetooth LE scan table.
"""
import sys
import argparse
from bgasync import api
from bgasync.twisted.protocol import BluegigaProtocol
from twisted.internet.serialport import SerialPort
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import deferLater
from twisted.internet import reactor
from twisted.python import log
class ScannerProtocol(BluegigaProtocol):
def handle_event_gap_scan_response(self, event):
print("Received advertising event [sender={}]: {}".format(api.get_address_string(event.sender), event))
@inlineCallbacks
def stop_discovery(protocol):
print("Stopping discovery.")
response = yield protocol.send_command(api.command_gap_end_procedure())
if response.result != 0:
print("Error stopping discovery! {}".format(api.get_error_code_string(response.result)))
# End program
reactor.stop()
@inlineCallbacks
def command_response_cb(response, protocol, duration):
if response.result != 0:
print("Error starting discovery! {}".format(api.get_error_code_string(response.result)))
# If we're in the wrong state, we may have simply exited
# while still discovering.
# Try to fix it by ending the GAP procedure and start over.
if response.result == api.ERR_BGAPI_DEVICE_IN_WRONG_STATE:
response = yield protocol.send_command(api.command_gap_end_procedure())
# Need a Deferred delay, but no action.
yield deferLater(reactor, 0.1, lambda: None)
response = yield protocol.send_command(api.command_gap_discover(mode=api.gap_discover_mode.discover_observation.value))
# Otherwise, we're unable to proceed.
else:
print("Terminating on error.")
reactor.stop()
else:
print("Discovery started.")
reactor.callLater(duration, stop_discovery, protocol)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("serial_port", help="Serial/COM port connected to the Bluegiga device.")
parser.add_argument("-d", "--duration", type=int, help="Specify duration of device discovery", default=60)
parser.add_argument("-v", "--verbose", action="store_true", help="Turn on verbose logging")
args = parser.parse_args()
print("Starting scanner (duration {} seconds)".format(args.duration))
protocol = ScannerProtocol()
port = SerialPort(protocol, args.serial_port, reactor, baudrate=256000)
d = protocol.send_command(api.command_gap_discover(mode=api.gap_discover_mode.discover_observation.value))
d.addCallback(command_response_cb, protocol, args.duration)
if args.verbose:
log.startLogging(sys.stderr, setStdout=False)
reactor.run()
if __name__ == "__main__":
main()
| bsd-2-clause | Python |
8a5b0c70164c7bc76d8825d77654f2804b9529e6 | Fix typo | lindareijnhoudt/resync,dans-er/resync,dans-er/resync,lindareijnhoudt/resync,resync/resync | resync/source_description.py | resync/source_description.py | """ResourceSync Description object
A ResourceSync Description enumerates the Capability Lists
offered by a Source. Since a Source has one Capability List
per set of resources that it distinguishes, the
ResourceSync Description will enumerate as many Capability
Lists as the Source has distinct sets of resources.
The ResourceSync Description can only be based on the
<urlset> format.
See: http://www.openarchives.org/rs/resourcesync#ResourceSyncDesc
May also contain metadata and links like other ResourceSync
documents.
"""
import collections
from resource import Resource
from resource_set import ResourceSet
from list_base_with_index import ListBaseWithIndex
class SourceDescription(ListBaseWithIndex):
"""Class representing the set of Capability Lists supported
Will admit only one resource with any given URI.
Storage is unordered but the iterator imposes a canonical order
which is currently alphabetical by URI.
"""
def __init__(self, resources=None, md=None, ln=None):
super(SourceDescription, self).__init__(resources=resources, md=md, ln=ln,
capability_name='description',
resources_class=ResourceSet)
self.md['from']=None #usually don't want a from date
def add(self, resource, replace=False):
"""Add a resource or an iterable collection of resources
Will throw a ValueError if the resource (ie. same uri) already
exists in the capability_list, unless replace=True.
"""
if isinstance(resource, collections.Iterable):
for r in resource:
self.resources.add(r,replace)
else:
self.resources.add(resource,replace)
def add_capability_list(self,capability_list=None):
"""Add a capability list
Adds either a CapabiltyList object specified in capability_list
or else creates a Resource with the URI given in capability_list
and adds that to the Source Description
"""
if (hasattr(capability_list,'uri')):
r = Resource( uri=capability_list.uri,
capability=capability_list.capability_name )
if (capability_list.describedby is not None):
r.link_add( rel='describedby', href=capability_list.describedby )
else:
r = Resource( uri=capability_list,
capability='capabilitylist')
self.add( r )
| """ResourceSync Description object
A ResourceSync Description enumerates the Capability Lists
offered by a Source. Since a Source has one Capability List
per set of resources that it distinguishes, the
ResourceSync Description will enumerate as many Capability
Lists as the Source has distinct sets of resources.
The ResourceSync Description can only be based on the
<urlset> format.
See: http://www.openarchives.org/rs/resourcesync#ResourceSyncDesc
May also contain metadata and links like other ResourceSync
document.
"""
import collections
from resource import Resource
from resource_set import ResourceSet
from list_base_with_index import ListBaseWithIndex
class SourceDescription(ListBaseWithIndex):
"""Class representing the set of Capability Lists supported
Will admit only one resource with any given URI.
Storage is unordered but the iterator imposes a canonical order
which is currently alphabetical by URI.
"""
def __init__(self, resources=None, md=None, ln=None):
super(SourceDescription, self).__init__(resources=resources, md=md, ln=ln,
capability_name='description',
resources_class=ResourceSet)
self.md['from']=None #usually don't want a from date
def add(self, resource, replace=False):
"""Add a resource or an iterable collection of resources
Will throw a ValueError if the resource (ie. same uri) already
exists in the capability_list, unless replace=True.
"""
if isinstance(resource, collections.Iterable):
for r in resource:
self.resources.add(r,replace)
else:
self.resources.add(resource,replace)
def add_capability_list(self,capability_list=None):
"""Add a capability list
Adds either a CapabiltyList object specified in capability_list
or else creates a Resource with the URI given in capability_list
and adds that to the Source Description
"""
if (hasattr(capability_list,'uri')):
r = Resource( uri=capability_list.uri,
capability=capability_list.capability_name )
if (capability_list.describedby is not None):
r.link_add( rel='describedby', href=capability_list.describedby )
else:
r = Resource( uri=capability_list,
capability='capabilitylist')
self.add( r )
| apache-2.0 | Python |
8d2ffbb17ce5dac6ae8abdf3c612e3348027090d | Add PUBLIC_IPS to consolidate socket logic | ipython/ipython,ipython/ipython | IPython/utils/localinterfaces.py | IPython/utils/localinterfaces.py | """Simple utility for building a list of local IPs using the socket module.
This module defines two constants:
LOCALHOST : The loopback interface, or the first interface that points to this
machine. It will *almost* always be '127.0.0.1'
LOCAL_IPS : A list of IP addresses, loopback first, that point to this machine.
PUBLIC_IPS : A list of public IP addresses that point to this machine.
Use these to tell remote clients where to find you.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import socket
from .data import uniq_stable
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
LOCAL_IPS = []
try:
LOCAL_IPS = socket.gethostbyname_ex('localhost')[2]
except socket.gaierror:
pass
PUBLIC_IPS = []
try:
PUBLIC_IPS = socket.gethostbyname_ex(socket.gethostname())[2]
except socket.gaierror:
pass
else:
PUBLIC_IPS = uniq_stable(PUBLIC_IPS)
LOCAL_IPS.extend(PUBLIC_IPS)
# include all-interface aliases: 0.0.0.0 and ''
LOCAL_IPS.extend(['0.0.0.0', ''])
LOCAL_IPS = uniq_stable(LOCAL_IPS)
LOCALHOST = LOCAL_IPS[0]
| """Simple utility for building a list of local IPs using the socket module.
This module defines two constants:
LOCALHOST : The loopback interface, or the first interface that points to this
machine. It will *almost* always be '127.0.0.1'
LOCAL_IPS : A list of IP addresses, loopback first, that point to this machine.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import socket
from .data import uniq_stable
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
LOCAL_IPS = []
try:
LOCAL_IPS = socket.gethostbyname_ex('localhost')[2]
except socket.gaierror:
pass
try:
LOCAL_IPS.extend(socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
pass
# include all-interface aliases: 0.0.0.0 and ''
LOCAL_IPS.extend(['0.0.0.0', ''])
LOCAL_IPS = uniq_stable(LOCAL_IPS)
LOCALHOST = LOCAL_IPS[0]
| bsd-3-clause | Python |
0473ba4bb1592c480f5741839fbf1dad59dd0403 | Print stats for researcher and department in tests | wetneb/dissemin,Lysxia/dissemin,dissemin/dissemin,wetneb/dissemin,dissemin/dissemin,dissemin/dissemin,dissemin/dissemin,wetneb/dissemin,Lysxia/dissemin,Lysxia/dissemin,dissemin/dissemin,wetneb/dissemin,Lysxia/dissemin | statistics/tests.py | statistics/tests.py | # Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""
Tests statistics update and statistics consistency.
"""
from django.test import TestCase
from backend.tests import PrefilledTest
from backend.globals import get_ccf
from backend.crossref import CrossRefPaperSource
from backend.oai import OaiPaperSource
from papers.models import PaperWorld
class StatisticsTest(PrefilledTest):
@classmethod
def setUpClass(self):
super(StatisticsTest, self).setUpClass()
ccf = get_ccf()
crps = CrossRefPaperSource(ccf)
oai = OaiPaperSource(ccf)
crps.fetch(self.r2, incremental=True)
oai.fetch(self.r2, incremental=True)
def validStats(self, stats):
self.assertTrue(stats.check_values())
self.assertTrue(stats.num_tot > 1)
def printStats(self, stats):
print "OA: %d" % stats.num_oa
print "OK: %d" % stats.num_ok
print "COULDBE: %d" % stats.num_couldbe
print "TOT: %d" % stats.num_tot
def test_researcher(self):
self.printStats(self.r2.stats)
self.validStats(self.r2.stats)
def test_department(self):
self.d.update_stats()
self.printStats(self.d.stats)
self.validStats(self.d.stats)
def test_institution(self):
self.i.update_stats()
self.validStats(self.i.stats)
def test_paperworld(self):
pw = PaperWorld.get_solo()
pw.update_stats()
self.validStats(pw.stats)
# TODO check journal and publisher stats
# TODO check that (for instance) department stats add up to institution stats
| # Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""
Tests statistics update and statistics consistency.
"""
from django.test import TestCase
from backend.tests import PrefilledTest
from backend.globals import get_ccf
from backend.crossref import CrossRefPaperSource
from backend.oai import OaiPaperSource
from papers.models import PaperWorld
class StatisticsTest(PrefilledTest):
@classmethod
def setUpClass(self):
super(StatisticsTest, self).setUpClass()
ccf = get_ccf()
crps = CrossRefPaperSource(ccf)
oai = OaiPaperSource(ccf)
crps.fetch(self.r2, incremental=True)
oai.fetch(self.r3, incremental=True)
def validStats(self, stats):
self.assertTrue(stats.check_values())
self.assertTrue(stats.num_tot > 1)
def test_researcher(self):
self.validStats(self.r2.stats)
def test_department(self):
self.d.update_stats()
self.validStats(self.d.stats)
def test_institution(self):
self.i.update_stats()
self.validStats(self.i.stats)
def test_paperworld(self):
pw = PaperWorld.get_solo()
pw.update_stats()
self.validStats(pw.stats)
# TODO check journal and publisher stats
# TODO check that (for instance) department stats add up to institution stats
| agpl-3.0 | Python |
bfdcc8bb36fba26baed28e2846a7ae193154a12b | fix the method to validate invoice | ClearCorp/odoo-clearcorp,ClearCorp/odoo-clearcorp,ClearCorp/odoo-clearcorp,ClearCorp/odoo-clearcorp | partner_slow_payer/models/res_partner.py | partner_slow_payer/models/res_partner.py | # -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
from datetime import date
from openerp.exceptions import Warning
class ResPartner(models.Model):
_inherit = 'res.partner'
slow_payer = fields.Boolean('Slow payer', compute='_compute_slow_payer')
@api.multi
def _compute_slow_payer(self):
today = date.today().strftime('%d-%m-%Y')
for partner in self:
invoice = self.env['account.invoice'].search(
[('partner_id', '=', partner.id),
('date_due', '<', today),
('state', '=', 'open')])
if invoice:
partner.slow_payer = True
return True
else:
return False
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def invoice_validate(self):
if not self.env.user.has_group(
'partner_slow_payer.group_partner_slow_payer'):
if self.partner_id.slow_payer:
raise Warning('You have a pending invoice')
elif self.partner_id.credit_limit == 0.0:
raise Warning('You credit is in cero')
else:
super(AccountInvoice, self).invoice_validate()
else:
super(AccountInvoice, self).invoice_validate()
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def action_wait(self):
super(SaleOrder, self).action_wait()
if not self.env.user.has_group(
'partner_slow_payer.group_partner_slow_payer'):
if self.partner_id.credit_limit == 0.0:
raise Warning('You credit is in cero')
elif self.partner_id.slow_payer:
raise Warning('You have a pending invoice')
| # -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
from datetime import date
from openerp.exceptions import Warning
class ResPartner(models.Model):
_inherit = 'res.partner'
slow_payer = fields.Boolean('Slow payer', compute='_compute_slow_payer')
@api.multi
def _compute_slow_payer(self):
today = date.today().strftime('%d-%m-%Y')
for partner in self:
invoice = self.env['account.invoice'].search(
[('partner_id', '=', partner.id),
('date_due', '<', today),
('state', '=', 'open')])
if invoice:
partner.slow_payer = True
return True
else:
return False
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def invoice_validate(self):
if not self.env.user.has_group(
'partner_slow_payer.group_partner_slow_payer'):
if self.partner_id.slow_payer:
raise Warning('You have a pending invoice')
elif self.partner_id.credit_limit == 0.0:
raise Warning('You credit is in cero')
else:
return self.write({'state': 'open'})
else:
return self.write({'state': 'open'})
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def action_wait(self):
super(SaleOrder, self).action_wait()
if not self.env.user.has_group(
'partner_slow_payer.group_partner_slow_payer'):
if self.partner_id.credit_limit == 0.0:
raise Warning('You credit is in cero')
elif self.partner_id.slow_payer:
raise Warning('You have a pending invoice')
| agpl-3.0 | Python |
63b2ed03b843d4eec0d15231d1b8d612a1df5a10 | use f-strings to help serialize int metadata (fix #63) | EmilStenstrom/conllu | conllu/serializer.py | conllu/serializer.py | import typing as T
from conllu.exceptions import ParseException
if T.TYPE_CHECKING:
from conllu.models import TokenList
def serialize_field(field: T.Any) -> str:
if field is None:
return '_'
if isinstance(field, dict):
fields = []
for key, value in field.items():
if value is None:
value = "_"
fields.append('='.join((key, value)))
return '|'.join(fields)
if isinstance(field, tuple):
return "".join([serialize_field(item) for item in field])
if isinstance(field, list):
if len(field[0]) != 2:
raise ParseException("Can't serialize '{}', invalid format".format(field))
return "|".join([serialize_field(value) + ":" + str(key) for key, value in field])
return "{}".format(field)
def serialize(tokenlist: 'TokenList') -> str:
lines = []
if tokenlist.metadata:
for key, value in tokenlist.metadata.items():
if value:
line = f"# {key} = {value}"
else:
line = f"# {key}"
lines.append(line)
for token_data in tokenlist:
line = '\t'.join(serialize_field(val) for val in token_data.values())
lines.append(line)
return '\n'.join(lines) + "\n\n"
| import typing as T
from conllu.exceptions import ParseException
if T.TYPE_CHECKING:
from conllu.models import TokenList
def serialize_field(field: T.Any) -> str:
if field is None:
return '_'
if isinstance(field, dict):
fields = []
for key, value in field.items():
if value is None:
value = "_"
fields.append('='.join((key, value)))
return '|'.join(fields)
if isinstance(field, tuple):
return "".join([serialize_field(item) for item in field])
if isinstance(field, list):
if len(field[0]) != 2:
raise ParseException("Can't serialize '{}', invalid format".format(field))
return "|".join([serialize_field(value) + ":" + str(key) for key, value in field])
return "{}".format(field)
def serialize(tokenlist: 'TokenList') -> str:
lines = []
if tokenlist.metadata:
for key, value in tokenlist.metadata.items():
if value:
line = "# " + key + " = " + value
else:
line = "# " + key
lines.append(line)
for token_data in tokenlist:
line = '\t'.join(serialize_field(val) for val in token_data.values())
lines.append(line)
return '\n'.join(lines) + "\n\n"
| mit | Python |
c6b9db6842f5c1929fd67e13647fe81b9242b328 | return blob count | scienceopen/CVutils | connectedComponents.py | connectedComponents.py | import cv2
try: #OpenCV 2.4
from cv2 import SimpleBlobDetector as SimpleBlobDetector
except ImportError: #OpenCV 3
from cv2 import SimpleBlobDetector_create as SimpleBlobDetector
def doblob(morphed,blobdet,img):
"""
img: can be RGB (MxNx3) or gray (MxN)
http://docs.opencv.org/master/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
http://docs.opencv.org/trunk/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
"""
keypoints = blobdet.detect(morphed)
nkey = len(keypoints)
final = img.copy() # is the .copy necessary?
final = cv2.drawKeypoints(img, keypoints, outImage=final,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#%% plot count of blobs (comment out if you don't want it)
cv2.putText(final, text=str(nkey), org=(int(img.shape[1]*.9),25),
fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=2,
color=(0,255,0), thickness=2)
return final,nkey
def setupblob(minarea, maxarea, mindist):
blobparam = cv2.SimpleBlobDetector_Params()
blobparam.filterByArea = True
blobparam.filterByColor = False
blobparam.filterByCircularity = False
blobparam.filterByInertia = False
blobparam.filterByConvexity = False
blobparam.minDistBetweenBlobs = mindist
blobparam.minArea = minarea
blobparam.maxArea = maxarea
#blobparam.minThreshold = 40 #we have already made a binary image
return SimpleBlobDetector(blobparam) | import cv2
try: #OpenCV 2.4
from cv2 import SimpleBlobDetector as SimpleBlobDetector
except ImportError: #OpenCV 3
from cv2 import SimpleBlobDetector_create as SimpleBlobDetector
def doblob(morphed,blobdet,img):
"""
img: can be RGB (MxNx3) or gray (MxN)
http://docs.opencv.org/master/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
http://docs.opencv.org/trunk/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
"""
keypoints = blobdet.detect(morphed)
nkey = len(keypoints)
final = img.copy() # is the .copy necessary?
final = cv2.drawKeypoints(img, keypoints, outImage=final,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#%% plot count of blobs (comment out if you don't want it)
cv2.putText(final, text=str(nkey), org=(int(img.shape[1]*.9),25),
fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=2,
color=(0,255,0), thickness=2)
return final
def setupblob(minarea, maxarea, mindist):
blobparam = cv2.SimpleBlobDetector_Params()
blobparam.filterByArea = True
blobparam.filterByColor = False
blobparam.filterByCircularity = False
blobparam.filterByInertia = False
blobparam.filterByConvexity = False
blobparam.minDistBetweenBlobs = mindist
blobparam.minArea = minarea
blobparam.maxArea = maxarea
#blobparam.minThreshold = 40 #we have already made a binary image
return SimpleBlobDetector(blobparam) | mit | Python |
021230bb5284de0fa5454df5f933e38b3539acd4 | add devilvery trip list | singesavant/salesmonkey | salesmonkey/delivery/rest.py | salesmonkey/delivery/rest.py | import logging
from werkzeug.exceptions import (
NotFound
)
from flask_apispec import (
marshal_with,
MethodResource,
use_kwargs
)
from erpnext_client.documents import (
ERPDeliveryTrip,
ERPContact,
ERPAddress
)
from erpnext_client.schemas import (
ERPDeliveryTripSchema,
ERPDeliveryStopSchema,
ERPContactSchema,
ERPAddressSchema
)
from webargs import fields
from salesmonkey import cache
from ..rest import api_v1
from ..erpnext import erp_client
LOGGER = logging.getLogger(__name__)
class DeliveryStopWithContactSchema(ERPDeliveryStopSchema):
contact = fields.Nested("ERPContactSchema")
address = fields.Nested("ERPAddressSchema")
class DeliveryTripWithContactSchema(ERPDeliveryTripSchema):
stops = fields.Nested("DeliveryStopWithContactSchema", load_from="delivery_stops", many=True)
@marshal_with(ERPDeliveryTripSchema(many=True))
class DeliveryTripList(MethodResource):
"""
List available Delivery Trip List
"""
@cache.memoize(timeout=10)
def get(self):
try:
trip_list = erp_client.query(ERPDeliveryTrip).list(filters=[['Delivery Trip', 'status', '=', 'Scheduled']])
except ERPDeliveryTrip.DoesNotExist:
raise NotFound
return trip_list
class DeliveryTripDetails(MethodResource):
"""
Help Delivery by giving customer infos and gmap trip
"""
@cache.memoize(timeout=3600)
def _get_contact_info(self, contact_name):
return erp_client.query(ERPContact).get(contact_name)
@cache.memoize(timeout=3600)
def _get_address_info(self, address_name):
return erp_client.query(ERPAddress).get(address_name)
@marshal_with(DeliveryTripWithContactSchema)
@cache.memoize(timeout=10)
def get(self, name):
try:
trip = erp_client.query(ERPDeliveryTrip).get(name)
except ERPDeliveryTrip.DoesNotExist:
raise NotFound
for stop in trip['stops']:
full_contact = self._get_contact_info(stop['contact'])
stop['address'] = self._get_address_info(stop['address'])
stop['contact'] = full_contact
return trip
api_v1.register('/delivery/trip/<name>', DeliveryTripDetails)
api_v1.register('/delivery/trip/', DeliveryTripList)
| import logging
from werkzeug.exceptions import (
NotFound
)
from flask_apispec import (
marshal_with,
MethodResource,
use_kwargs
)
from erpnext_client.documents import (
ERPDeliveryTrip,
ERPContact,
ERPAddress
)
from erpnext_client.schemas import (
ERPDeliveryTripSchema,
ERPDeliveryStopSchema,
ERPContactSchema,
ERPAddressSchema
)
from webargs import fields
from salesmonkey import cache
from ..rest import api_v1
from ..erpnext import erp_client
LOGGER = logging.getLogger(__name__)
class DeliveryStopWithContactSchema(ERPDeliveryStopSchema):
contact = fields.Nested("ERPContactSchema")
address = fields.Nested("ERPAddressSchema")
class DeliveryTripWithContactSchema(ERPDeliveryTripSchema):
stops = fields.Nested("DeliveryStopWithContactSchema", load_from="delivery_stops", many=True)
class DeliveryTrip(MethodResource):
"""
Help Delivery by giving customer infos and gmap trip
"""
@cache.memoize(timeout=3600)
def _get_contact_info(self, contact_name):
return erp_client.query(ERPContact).get(contact_name)
@cache.memoize(timeout=3600)
def _get_address_info(self, address_name):
return erp_client.query(ERPAddress).get(address_name)
@marshal_with(DeliveryTripWithContactSchema)
@cache.memoize(timeout=1)
def get(self, name):
try:
trip = erp_client.query(ERPDeliveryTrip).get(name)
except ERPDeliveryTrip.DoesNotExist:
raise NotFound
for stop in trip['stops']:
full_contact = self._get_contact_info(stop['contact'])
stop['address'] = self._get_address_info(stop['address'])
stop['contact'] = full_contact
return trip
api_v1.register('/delivery/trip/<name>', DeliveryTrip)
| agpl-3.0 | Python |
a93fc1d9455cc520f2cdef819205b13640677355 | Add import errno (#3) | SandstoneHPC/sandstone-spawner | sandstone_spawner/spawner.py | sandstone_spawner/spawner.py | from jupyterhub.spawner import LocalProcessSpawner
from jupyterhub.utils import random_port
from subprocess import Popen
from tornado import gen
import pipes
import shutil
import os
import errno
# This is the path to the sandstone-jupyterhub script
APP_PATH = os.environ.get('SANDSTONE_APP_PATH')
SANDSTONE_SETTINGS = os.environ.get('SANDSTONE_SETTINGS')
class SandstoneSpawner(LocalProcessSpawner):
@gen.coroutine
def start(self):
"""Start the single-user server."""
self.port = random_port()
cmd = [APP_PATH]
env = self.get_env()
env['SANDSTONE_SETTINGS'] = SANDSTONE_SETTINGS
args = self.get_args()
# print(args)
cmd.extend(args)
self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
try:
self.proc = Popen(cmd, env=env,
preexec_fn=self.make_preexec_fn(self.user.name),
start_new_session=True, # don't forward signals
)
except PermissionError:
# use which to get abspath
script = shutil.which(cmd[0]) or cmd[0]
self.log.error("Permission denied trying to run %r. Does %s have access to this file?",
script, self.user.name,
)
raise
self.pid = self.proc.pid
if self.__class__ is not LocalProcessSpawner:
# subclasses may not pass through return value of super().start,
# relying on deprecated 0.6 way of setting ip, port,
# so keep a redundant copy here for now.
# A deprecation warning will be shown if the subclass
# does not return ip, port.
if self.ip:
self.user.server.ip = self.ip
self.user.server.port = self.port
return (self.ip or '127.0.0.1', self.port)
@gen.coroutine
def _signal(self, sig):
try:
os.killpg(os.getpgid(self.pid), sig)
except OSError as e:
if e.errno == errno.ESRCH:
return False
else:
raise
return True
| from jupyterhub.spawner import LocalProcessSpawner
from jupyterhub.utils import random_port
from subprocess import Popen
from tornado import gen
import pipes
import shutil
import os
# This is the path to the sandstone-jupyterhub script
APP_PATH = os.environ.get('SANDSTONE_APP_PATH')
SANDSTONE_SETTINGS = os.environ.get('SANDSTONE_SETTINGS')
class SandstoneSpawner(LocalProcessSpawner):
@gen.coroutine
def start(self):
"""Start the single-user server."""
self.port = random_port()
cmd = [APP_PATH]
env = self.get_env()
env['SANDSTONE_SETTINGS'] = SANDSTONE_SETTINGS
args = self.get_args()
# print(args)
cmd.extend(args)
self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
try:
self.proc = Popen(cmd, env=env,
preexec_fn=self.make_preexec_fn(self.user.name),
start_new_session=True, # don't forward signals
)
except PermissionError:
# use which to get abspath
script = shutil.which(cmd[0]) or cmd[0]
self.log.error("Permission denied trying to run %r. Does %s have access to this file?",
script, self.user.name,
)
raise
self.pid = self.proc.pid
if self.__class__ is not LocalProcessSpawner:
# subclasses may not pass through return value of super().start,
# relying on deprecated 0.6 way of setting ip, port,
# so keep a redundant copy here for now.
# A deprecation warning will be shown if the subclass
# does not return ip, port.
if self.ip:
self.user.server.ip = self.ip
self.user.server.port = self.port
return (self.ip or '127.0.0.1', self.port)
@gen.coroutine
def _signal(self, sig):
try:
os.killpg(os.getpgid(self.pid), sig)
except OSError as e:
if e.errno == errno.ESRCH:
return False
else:
raise
return True
| mit | Python |
85c1f90a6e46e76ab3a59580a2a1620d849a987f | Change namespaces | jessepeng/coburg-city-memory,jessepeng/coburg-city-memory,fraunhoferfokus/mobile-city-memory,fraunhoferfokus/mobile-city-memory | Mobiles_Stadtgedaechtnis/urls.py | Mobiles_Stadtgedaechtnis/urls.py | from django.conf.urls import patterns, include, url
import stadtgedaechtnis_backend.admin
import settings
js_info_dict = {
'packages': ('stadtgedaechtnis_backend',),
}
urlpatterns = patterns('',
url(r'^', include('stadtgedaechtnis_backend.urls', namespace="stadtgedaechtnis_backend")),
url(r'^', include('stadtgedaechtnis_frontend.urls', namespace="stadtgedaechtnis_frontend")),
url(r'^admin/', include(stadtgedaechtnis_backend.admin.site.urls)),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^jsi18n/', 'django.views.i18n.javascript_catalog', js_info_dict),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, }),
)
| from django.conf.urls import patterns, include, url
import stadtgedaechtnis.admin
import settings
js_info_dict = {
'packages': ('stadtgedaechtnis',),
}
urlpatterns = patterns('',
url(r'^', include('stadtgedaechtnis.urls', namespace="stadtgedaechtnis")),
url(r'^', include('stadtgedaechtnis_frontend.urls', namespace="stadtgedaechtnis_frontend")),
url(r'^admin/', include(stadtgedaechtnis.admin.site.urls)),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^jsi18n/', 'django.views.i18n.javascript_catalog', js_info_dict),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, }),
)
| mit | Python |
a0b4f997b2f0d29a99a3b95e247169b9b0222b7a | Bump boto version | pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus | packages/pegasus-worker/setup.py | packages/pegasus-worker/setup.py | import os
import subprocess
from setuptools import find_packages, setup
src_dir = os.path.dirname(__file__)
home_dir = os.path.abspath(os.path.join(src_dir, "../.."))
install_requires = [
"six>=1.9.0",
"boto==2.49.0",
"globus-sdk==1.4.1",
]
#
# Utility function to read the pegasus Version.in file
#
def read_version():
return (
subprocess.Popen(
"%s/release-tools/getversion" % home_dir, stdout=subprocess.PIPE, shell=True
)
.communicate()[0]
.decode()
.strip()
)
#
# Utility function to read the README file.
#
def read(fname):
return open(os.path.join(src_dir, fname)).read()
setup(
name="pegasus-wms.worker",
version=read_version(),
author="Pegasus Team",
author_email="pegasus@isi.edu",
description="Pegasus Workflow Management System Python API",
long_description=read("README.md"),
long_description_content_type="text/markdown",
license="Apache2",
url="http://pegasus.isi.edu",
python_requires=">=2.6,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
keywords=["scientific workflows"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
namespace_packages=["Pegasus", "Pegasus.cli", "Pegasus.tools"],
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
)
| import os
import subprocess
from setuptools import find_packages, setup
src_dir = os.path.dirname(__file__)
home_dir = os.path.abspath(os.path.join(src_dir, "../.."))
install_requires = [
"six>=1.9.0",
"boto==2.48.0",
"globus-sdk==1.4.1",
]
#
# Utility function to read the pegasus Version.in file
#
def read_version():
return (
subprocess.Popen(
"%s/release-tools/getversion" % home_dir, stdout=subprocess.PIPE, shell=True
)
.communicate()[0]
.decode()
.strip()
)
#
# Utility function to read the README file.
#
def read(fname):
return open(os.path.join(src_dir, fname)).read()
setup(
name="pegasus-wms.worker",
version=read_version(),
author="Pegasus Team",
author_email="pegasus@isi.edu",
description="Pegasus Workflow Management System Python API",
long_description=read("README.md"),
long_description_content_type="text/markdown",
license="Apache2",
url="http://pegasus.isi.edu",
python_requires=">=2.6,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
keywords=["scientific workflows"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
namespace_packages=["Pegasus", "Pegasus.cli", "Pegasus.tools"],
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
)
| apache-2.0 | Python |
83f56db640dd22a40693a31826b6c89de3adf9a1 | Fix bug | pmatos/maxsatzilla,pmatos/maxsatzilla,pmatos/maxsatzilla,pmatos/maxsatzilla,pmatos/maxsatzilla | run-instance-set.py | run-instance-set.py | #!/usr/bin/env python
import sys, os, getopt, glob, os.path, signal, tempfile
opts, args = getopt.getopt( sys.argv[1:], 'p:' )
solver = args[0]
set_list = args[1]
execution_list = args[2:]
if len( opts ) == 1:
print '# Percentatge = ' + opts[0][1]
percentatge = int( opts[0][1] )
else:
percentatge = 100
timeout = '1000'
memout = '2000000'
sys_limits = 'ulimit -t ' + timeout + '; ulimit -m ' + memout + '; ulimit -d ' + memout + '; ulimit -v ' + memout + ';'
total_instances_run = 0
def handler( signal, frame ):
print "Script interrupted"
sys.exit(0)
signal.signal( signal.SIGINT, handler )
for set in open( set_list ):
if set[0] == '#':
continue
name, path, number = set.split()
if name in execution_list:
counter = 0
times = int( number ) * percentatge / 100
total_instances_run += times
print '# Running ' + solver + ' with ' + name + ' ' + str( times ) + '/' + number
outfile = os.path.basename( solver.split()[0] ) + '.' + name + '.out'
os.system( 'cp /dev/null ' + outfile )
for file in glob.glob( path ):
if counter < times:
counter += 1
timefile = tempfile.NamedTemporaryFile()
os.popen( sys_limits + '/usr/bin/time -p ' + solver + ' ' + file + ' >> ' + outfile +' 2> ' + timefile.name )
print 'o ' + file + ' ' + os.popen( 'grep user ' + timefile.name + ' | cut -c-5' ).read()
print '# Total instances run ' + str( total_instances_run )
| #!/usr/bin/env python
import sys, os, getopt, glob, os.path, signal, tempfile
opts, args = getopt.getopt( sys.argv[1:], 'p:' )
solver = args[0]
set_list = args[1]
execution_list = args[2:]
if len( opts ) == 1:
print '# Percentatge = ' + opts[0][1]
percentatge = int( opts[0][1] )
else:
percentatge = 100
timeout = '1000'
memout = '2000000'
sys_limits = 'ulimit -t ' + timeout + '; ulimit -m ' + memout + '; ulimit -d ' + memout + '; ulimit -v ' + memout + ';'
total_instances_run = 0
def handler( signal, frame ):
print "Script interrupted"
sys.exit(0)
signal.signal( signal.SIGINT, handler )
for set in open( set_list ):
if set[0] == '#':
continue
name, path, number = set.split()
if name in execution_list:
counter = 0
times = int( number ) * percentatge / 100
total_instances_run += times
print '# Running ' + solver + ' with ' + name + ' ' + str( times ) + '/' + number
outfile = os.path.basename( solver.split()[0] ) + '.' + name + '.out'
os.system( 'cp /dev/null ' + outfile )
for file in glob.glob( path ):
if counter < times:
counter += 1
timefile = tempfile.NamedTemporaryFile()
os.popen( sys_limits + '/usr/bin/time -p ' + solver + ' ' + file + ' >> ' + outfile +' 2> ' + timefile.name )
print 'o ' + file + ' ' + os.popen( 'grep user ' + timefile.name + ' | cut -c-5' )
print '# Total instances run ' + str( total_instances_run )
| mit | Python |
66a425599e2b88336cf78f64c8ad04a88045ad1c | Improve docs | KeplerGO/K2metadata,barentsen/K2metadata,barentsen/k2-target-index | scripts/3-create-database.py | scripts/3-create-database.py | """Export a CSV and SQLite database detailing all the K2 target pixel files.
"""
import glob
import logging
import sqlite3
import pandas as pd
log = logging.getLogger(__name__)
log.setLevel("INFO")
# Output filenames
CSV_FILENAME = "../k2-target-pixel-files.csv"
SQLITE_FILENAME = "../k2-target-pixel-files.db"
if __name__ == "__main__":
log.info("Reading the data")
df = pd.concat([pd.read_csv(fn)
for fn
in glob.glob("intermediate-data/*metadata.csv")])
df = df.sort_values("keplerid")
# Write to the CSV file
log.info("Writing {}".format(CSV_FILENAME))
df.to_csv(CSV_FILENAME, index=False)
# Write the SQLite table
log.info("Writing {}".format(SQLITE_FILENAME))
con = sqlite3.connect(SQLITE_FILENAME)
df.to_sql(name='tpf', con=con, if_exists='replace', index=False)
| """Creates an SQLite database detailing all the K2 target pixel files.
TODO
----
* Add an index to the sqlite table?
"""
import glob
import logging
import sqlite3
import pandas as pd
log = logging.getLogger(__name__)
log.setLevel("INFO")
CSV_FILENAME = "../k2-target-pixel-files.csv"
SQLITE_FILENAME = "../k2-target-pixel-files.db"
if __name__ == "__main__":
log.info("Reading the data")
df = pd.concat([pd.read_csv(fn)
for fn
in glob.glob("intermediate-data/*metadata.csv")])
df = df.sort_values("keplerid")
# Write to the CSV file
log.info("Writing {}".format(CSV_FILENAME))
df.to_csv(CSV_FILENAME, index=False)
# Write the SQLite table
log.info("Writing {}".format(SQLITE_FILENAME))
con = sqlite3.connect(SQLITE_FILENAME)
df.to_sql(name='tpf', con=con, if_exists='replace', index=False)
| mit | Python |
4a37cd5dde97d58da43fd664b807362e67a38eeb | bump version | crateio/carrier | conveyor/__init__.py | conveyor/__init__.py | __version__ = "0.1.dev18"
| __version__ = "0.1.dev17"
| bsd-2-clause | Python |
2fab26a2ae3fc3bb76b63d4744accf40fc5db4d6 | Add support for --version. | flaccid/akaudit | akaudit/clidriver.py | akaudit/clidriver.py | #!/usr/bin/env python
# Copyright 2015 Chris Fordham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import akaudit
from akaudit.audit import Auditer
def main(argv = sys.argv, log = sys.stderr):
parser = argparse.ArgumentParser(description='Audit who has access to your homes.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-l', '--log', default='info', help='log level')
parser.add_argument('-i', '--interactive', help='interactive mode (prompts asking if to delete each key)', action="store_true")
parser.add_argument('-v', '--version', action="version", version='%(prog)s ' + akaudit.__version__)
args = parser.parse_args()
auditer = Auditer()
auditer.run_audit(args)
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/env python
# Copyright 2015 Chris Fordham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
from akaudit.audit import Auditer
def main(argv = sys.argv, log = sys.stderr):
parser = argparse.ArgumentParser(description='Audit who has access to your homes.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-l', '--log', default='info', help='Log level')
parser.add_argument('-i', '--interactive', help='Interactive mode (prompts asking if to delete each key)', action="store_true")
args = parser.parse_args()
auditer = Auditer()
auditer.run_audit(args)
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 | Python |
3e957e939fa1ab4a003202c250f5895c2da04312 | use default host | uw-it-cte/wheniwork-restclient,uw-it-cte/wheniwork-restclient | wheniwork/dao.py | wheniwork/dao.py | from os.path import abspath, dirname, join
from restclients_core.dao import DAO
class WhenIWork_DAO(DAO):
def service_name(self):
return 'wheniwork'
def service_mock_paths(self):
return [abspath(join(dirname(__file__), "resources"))]
def get_default_service_setting(self, key):
if "HOST" == key:
return 'https://api.wheniwork.com'
| from os.path import abspath, dirname, join
from restclients_core.dao import DAO
class WhenIWork_DAO(DAO):
def service_name(self):
return 'wheniwork'
def service_mock_paths(self):
return [abspath(join(dirname(__file__), "resources"))]
| apache-2.0 | Python |
69b50cbe9816bf5c4cd8c405d28d75cd31e3c0ae | Fix alignak_home for windows | Alignak-monitoring-contrib/alignak-app,Alignak-monitoring-contrib/alignak-app | alignak_app/utils.py | alignak_app/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
"""
Application logs
"""
from logging import Formatter
from logging import DEBUG
from logging.handlers import TimedRotatingFileHandler
import os
import sys
def create_logger(logger): # pragma: no cover
"""
Create the logger for Alignak-App
:param logger: the main logger.
:type logger: :class:`~`
"""
path = get_alignak_home() + '/alignak_app'
filename = 'alignakapp.log'
if not os.path.isdir(path):
# noinspection PyBroadException
try: # pragma: no cover - not testable
os.makedirs(path)
except Exception:
path = '.'
if not os.access(path, os.W_OK):
path = '.'
formatter = Formatter('[%(asctime)s] - %(name)-12s - %(levelname)s - %(message)s')
file_handler = TimedRotatingFileHandler(
filename=os.path.join(path, filename),
when="D",
interval=1,
backupCount=6
)
file_handler.setLevel(DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(DEBUG)
def get_alignak_home(): # pragma: no cover
"""
Return user home.
"""
# Get HOME and USER
if 'linux' in sys.platform or 'sunos5' in sys.platform:
alignak_home = os.environ['HOME']
alignak_home += '/.local'
elif 'win32' in sys.platform:
alignak_home = os.environ['USERPROFILE']
alignak_home += '\\AppData\\Roaming\\Python\\'
else:
sys.exit('Application can\'t find the user HOME.')
# Prevent from root user
if 'root' in alignak_home or not alignak_home:
sys.exit('Application can\'t find the user HOME or maybe you are connected as ROOT.')
alignak_home += '/.local'
return alignak_home
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
"""
Application logs
"""
from logging import Formatter
from logging import DEBUG
from logging.handlers import TimedRotatingFileHandler
import os
import sys
def create_logger(logger): # pragma: no cover
"""
Create the logger for Alignak-App
:param logger: the main logger.
:type logger: :class:`~`
"""
path = get_alignak_home() + '/alignak_app'
filename = 'alignakapp.log'
if not os.path.isdir(path):
# noinspection PyBroadException
try: # pragma: no cover - not testable
os.makedirs(path)
except Exception:
path = '.'
if not os.access(path, os.W_OK):
path = '.'
formatter = Formatter('[%(asctime)s] - %(name)-12s - %(levelname)s - %(message)s')
file_handler = TimedRotatingFileHandler(
filename=os.path.join(path, filename),
when="D",
interval=1,
backupCount=6
)
file_handler.setLevel(DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(DEBUG)
def get_alignak_home(): # pragma: no cover
"""
Return user home.
"""
# Get HOME and USER
if 'linux' in sys.platform or 'sunos5' in sys.platform:
alignak_home = os.environ['HOME']
elif 'win32' in sys.platform:
alignak_home = os.environ['USERPROFILE']
else:
sys.exit('Application can\'t find the user HOME or maybe you are connected as ROOT.')
if 'root' in alignak_home or not alignak_home:
sys.exit('Application can\'t find the user HOME or maybe you are connected as ROOT.')
if alignak_home.endswith('/'):
alignak_home = alignak_home[:-1]
alignak_home += '/.local'
return alignak_home
| agpl-3.0 | Python |
c635a8bd409dcd985352c821410949c39620dcbb | Fix 0016 | Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python | Drake-Z/0016/0016.py | Drake-Z/0016/0016.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''第 0016 题: 纯文本文件 numbers.txt, 里面的内容(包括方括号)如下所示:
[
[1, 82, 65535],
[20, 90, 13],
[26, 809, 1024]
]
请将上述内容写到 numbers.xls 文件中。'''
__author__ = 'Drake-Z'
import os
import re
from collections import OrderedDict
import xlwt
def shuju(data, re1, re2):
c = OrderedDict([])
re_xuhao = re.compile(r'%s' % re1)
re_yuansu = re.compile(r'%s' % re2)
a = re_xuhao.findall(data) #得到序号
if len(a)==0:
for d in range(0, hangshu):
a.append(d)
b = re_yuansu.findall(data) #得到具体数据
for m, n in zip(a, b): #将数据转为Dict
n = re.split(r',', n)
c[m] = n
writeFlie(c, hangshu, lieshu)
def writeFlie(dictdata, hangshu, lieshu):
workbook = xlwt.Workbook(encoding = 'utf-8') #创建工作薄
worksheet = workbook.add_sheet('My Worksheet') #创建表
num = list(dictdata.keys()) #得到序号
for i in range(0, hangshu):
for m in range(0, lieshu):
worksheet.write(i, m, label = dictdata[num[i]][m])
workbook.save('numbers.xls')
if __name__ == '__main__':
file = open('numbers.txt', 'r', encoding='utf-8')
hangshu = 3
lieshu = 3
re1 = '%'
re2 = '\[(.*?)\]'
shuju(file.read(), re1, re2) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''第 0015 题: 纯文本文件 city.txt为城市信息, 里面的内容(包括花括号)如下所示:
{
"1" : "上海",
"2" : "北京",
"3" : "成都"
}
请将上述内容写到 city.xls 文件中。'''
__author__ = 'Drake-Z'
import os
import re
from collections import OrderedDict
import xlwt
def shuju(data, re1, re2):
c = OrderedDict([])
re_xuhao = re.compile(r'%s' % re1)
re_yuansu = re.compile(r'%s' % re2)
a = re_xuhao.findall(data) #得到序号
if len(a)==0:
for d in range(0, hangshu):
a.append(d)
b = re_yuansu.findall(data) #得到具体数据
for m, n in zip(a, b): #将数据转为Dict
n = re.split(r',', n)
c[m] = n
writeFlie(c, hangshu, lieshu)
def writeFlie(dictdata, hangshu, lieshu):
workbook = xlwt.Workbook(encoding = 'utf-8') #创建工作薄
worksheet = workbook.add_sheet('My Worksheet') #创建表
num = list(dictdata.keys()) #得到序号
for i in range(0, hangshu):
for m in range(0, lieshu):
worksheet.write(i, m, label = dictdata[num[i]][m])
workbook.save('numbers.xls')
if __name__ == '__main__':
file = open('numbers.txt', 'r', encoding='utf-8')
hangshu = 3
lieshu = 3
re1 = '%'
re2 = '\[(.*?)\]'
shuju(file.read(), re1, re2) | mit | Python |
dd38751a775cdbf5a006dd4fd3ce1ed335d94549 | use x0 = '00C' | simpeg/simpeg,simpeg/discretize,simpeg/discretize,simpeg/discretize | SimPEG/Examples/Mesh_Plot_Cyl.py | SimPEG/Examples/Mesh_Plot_Cyl.py | import numpy as np
import matplotlib.pyplot as plt
from SimPEG import Mesh, Utils, Maps
# Set a nice colormap!
plt.set_cmap(plt.get_cmap('viridis'))
def run(plotIt=True):
"""
Plot Mirrored Cylindrically Symmetric Model
===========================================
Here, we demonstrate plotting a model on a cylindrically
symmetric mesh with the plotting symmetric about x=0.
"""
sig_halfspace = 1e-6
sig_sphere = 1e0
sig_air = 1e-8
sphere_z = -50.
sphere_radius = 30.
# x-direction
cs = 1
nc = np.ceil(2.5*(- (sphere_z-sphere_radius))/cs)
# cell spacings in the x and z directions
h = Utils.meshTensor([(cs, nc)])
# define a mesh
mesh = Mesh.CylMesh([h, 1, h], x0='00C')
# Put the model on the mesh
sigma = sig_air*np.ones(mesh.nC) # start with air cells
sigma[mesh.gridCC[:, 2] < 0.] = sig_halfspace # cells below the earth
# indices of the sphere
sphere_ind = (
(mesh.gridCC[:, 0]**2 + (mesh.gridCC[:, 2] - sphere_z)**2) <=
sphere_radius**2
)
sigma[sphere_ind] = sig_sphere # sphere
# Plot a cross section through the mesh
fig, ax = plt.subplots(2, 1)
plt.colorbar(mesh.plotImage(np.log10(sigma), ax=ax[0])[0], ax=ax[0])
ax[0].set_title('mirror = False')
ax[0].axis('equal')
ax[0].set_xlim([-200., 200.])
plt.colorbar(
mesh.plotImage(np.log10(sigma), ax=ax[1], mirror=True)[0], ax=ax[1]
)
ax[1].set_title('mirror = True')
ax[1].axis('equal')
ax[1].set_xlim([-200., 200.])
plt.tight_layout()
if __name__ == '__main__':
run()
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
from SimPEG import Mesh, Utils, Maps
# Set a nice colormap!
plt.set_cmap(plt.get_cmap('viridis'))
def run(plotIt=True):
"""
Plot Mirrored Cylindrically Symmetric Model
===========================================
Here, we demonstrate plotting a model on a cylindrically
symmetric mesh with the plotting symmetric about x=0.
"""
sig_halfspace = 1e-6
sig_sphere = 1e0
sig_air = 1e-8
sphere_z = -50.
sphere_radius = 30.
# x-direction
cs = 1
nc = np.ceil(2.5*(- (sphere_z-sphere_radius))/cs)
# cell spacings in the x and z directions
h = Utils.meshTensor([(cs, nc)])
# define a mesh
mesh = Mesh.CylMesh([h, 1, h], x0 = np.r_[0., 0., -h.sum()/2.])
# Put the model on the mesh
sigma = sig_air*np.ones(mesh.nC) # start with air cells
sigma[mesh.gridCC[:, 2] < 0.] = sig_halfspace # cells below the earth
# indices of the sphere
sphere_ind = (
(mesh.gridCC[:, 0]**2 + (mesh.gridCC[:, 2] - sphere_z)**2) <=
sphere_radius**2
)
sigma[sphere_ind] = sig_sphere # sphere
# Plot a cross section through the mesh
fig, ax = plt.subplots(2, 1)
plt.colorbar(mesh.plotImage(np.log10(sigma), ax=ax[0])[0], ax=ax[0])
ax[0].set_title('mirror = False')
ax[0].axis('equal')
ax[0].set_xlim([-200., 200.])
plt.colorbar(
mesh.plotImage(np.log10(sigma), ax=ax[1], mirror=True)[0], ax=ax[1]
)
ax[1].set_title('mirror = True')
ax[1].axis('equal')
ax[1].set_xlim([-200., 200.])
plt.tight_layout()
if __name__ == '__main__':
run()
plt.show()
| mit | Python |
4b0dce64ccac8b8658e49e0ff36714872158a428 | Add missing re.UNICODE flag to re.compile call | samjabrahams/anchorhub | anchorhub/builtin/github/cstrategies.py | anchorhub/builtin/github/cstrategies.py | """
Concrete CollectorStrategy classes for the GitHub built-in module
"""
import re
from anchorhub.collector import CollectorStrategy
class MarkdownATXCollectorStrategy(CollectorStrategy):
"""
Concrete collector strategy used to parse ATX style headers that have
AnchorHub tags specified
ATX style headers begin with 1-5 hash '#' characters, and then use
the rest of the line to specify the text of the header. For example:
# This is an ATX header!
### So is this!
"""
def __init__(self, opts):
"""
Initializes the object to utilize the AnchorHub tag wrapper
as specified.
:param opts: Namespace with the attribute 'wrapper_pattern',
typically obtained through command-line argument parsing
"""
self._open = opts.open
self._close = opts.close
self._header_pattern = r"^#+ .+" + opts.wrapper_regex + r"\s*$"
self._regex = re.compile(self._header_pattern, re.UNICODE)
def test(self, file_lines, index):
"""
Is this line an ATX header with an AnchorHub tag specified? Return
True if it is.
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: True if the line in file_lines at index is an ATX header
with an AnchorHub tag declared. False otherwise
"""
return self._regex.match(file_lines[index])
def get(self, file_lines, index):
"""
Extract the specified AnchorHub tag, as well as the portion of the
line that should be converted from the ATX style Markdown header.
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: [tag, string] - tag: the extracted AnchorHub tag. string -
the portion of the header that should be converted into an anchor
"""
line = file_lines[index]
start_index = line.find('# ') + 2 # Start index for header text
start_tag = line.rfind(self._open) # Start index of AnchorHub tag
end_tag = line.rfind(self._close) # End index of AnchorHub tag
# The magic '+1' below knocks out the hash '#' character from extraction
tag = line[start_tag + len(self._open) + 1:end_tag]
string = line[start_index:start_tag]
return [tag, string]
| """
Concrete CollectorStrategy classes for the GitHub built-in module
"""
import re
from anchorhub.collector import CollectorStrategy
class MarkdownATXCollectorStrategy(CollectorStrategy):
"""
Concrete collector strategy used to parse ATX style headers that have
AnchorHub tags specified
ATX style headers begin with 1-5 hash '#' characters, and then use
the rest of the line to specify the text of the header. For example:
# This is an ATX header!
### So is this!
"""
def __init__(self, opts):
"""
Initializes the object to utilize the AnchorHub tag wrapper
as specified.
:param opts: Namespace with the attribute 'wrapper_pattern',
typically obtained through command-line argument parsing
"""
super(MarkdownATXCollectorStrategy, self).__init__()
self._open = opts.open
self._close = opts.close
self._header_pattern = r"^#+ .+" + opts.wrapper_regex + r"\s*$"
self._regex = re.compile(self._header_pattern)
def test(self, file_lines, index):
"""
Is this line an ATX header with an AnchorHub tag specified? Return
True if it is.
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: True if the line in file_lines at index is an ATX header
with an AnchorHub tag declared. False otherwise
"""
return self._regex.match(file_lines[index])
def get(self, file_lines, index):
"""
Extract the specified AnchorHub tag, as well as the portion of the
line that should be converted from the ATX style Markdown header.
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: [tag, string] - tag: the extracted AnchorHub tag. string -
the portion of the header that should be converted into an anchor
"""
line = file_lines[index]
start_index = line.find('# ') + 2 # Start index for header text
start_tag = line.rfind(self._open) # Start index of AnchorHub tag
end_tag = line.rfind(self._close) # End index of AnchorHub tag
# The magic '+1' below knocks out the hash '#' character from extraction
tag = line[start_tag + len(self._open) + 1:end_tag]
string = line[start_index:start_tag]
return [tag, string]
| apache-2.0 | Python |
03b8e53bae5125ddf79df4803152909a27217510 | Bump version -> 0.2.4 | nickstenning/tagalog,alphagov/tagalog,nickstenning/tagalog,alphagov/tagalog | tagalog/__init__.py | tagalog/__init__.py | from __future__ import unicode_literals
import datetime
import os
from tagalog import io
__all__ = ['io', 'stamp', 'tag', 'fields']
__version__ = '0.2.4'
# Use UTF8 for stdin, stdout, stderr
os.environ['PYTHONIOENCODING'] = 'utf-8'
def now():
return _now().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
def stamp(iterable, key='@timestamp'):
"""
Compute an accurate timestamp for each dict or dict-like object in
``iterable``, adding an accurate timestamp to each one when received. The
timestamp is a usecond-precision ISO8601 string. The timestamp is added to
each dict with a key set by ``key``.
"""
for item in iterable:
item[key] = now()
yield item
def fields(iterable, fields=None):
"""
Add a set of fields to each item in ``iterable``. The set of fields have a
key=value format. '@' are added to the front of each key.
"""
if not fields:
for item in iterable:
yield item
prepared_fields = _prepare_fields(fields)
for item in iterable:
yield _process_fields(item, prepared_fields)
def _process_fields(item, fields):
item.update(fields)
return item
def _prepare_fields(fields):
prepared_fields = {}
for field in fields:
split_field = field.split('=', 1)
if len(split_field) > 1:
prepared_fields[split_field[0]] = split_field[1][:]
return { '@fields': prepared_fields }
def tag(iterable, tags=None, key='@tags'):
"""
Add tags to each dict or dict-like object in ``iterable``. Tags are added
to each dict with a key set by ``key``. If a key already exists under the
key given by ``key``, this function will attempt to ``.extend()``` it, but
will fall back to replacing it in the event of error.
"""
if not tags:
for item in iterable:
yield item
else:
for item in iterable:
yield _tag(item, tags, key)
def _now():
return datetime.datetime.utcnow()
def _tag(item, tags, key):
if item.get(key):
try:
item[key].extend(tags)
return item
except (AttributeError, TypeError):
pass
item[key] = tags[:]
return item
| from __future__ import unicode_literals
import datetime
import os
from tagalog import io
__all__ = ['io', 'stamp', 'tag', 'fields']
__version__ = '0.2.3'
# Use UTF8 for stdin, stdout, stderr
os.environ['PYTHONIOENCODING'] = 'utf-8'
def now():
return _now().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
def stamp(iterable, key='@timestamp'):
"""
Compute an accurate timestamp for each dict or dict-like object in
``iterable``, adding an accurate timestamp to each one when received. The
timestamp is a usecond-precision ISO8601 string. The timestamp is added to
each dict with a key set by ``key``.
"""
for item in iterable:
item[key] = now()
yield item
def fields(iterable, fields=None):
"""
Add a set of fields to each item in ``iterable``. The set of fields have a
key=value format. '@' are added to the front of each key.
"""
if not fields:
for item in iterable:
yield item
prepared_fields = _prepare_fields(fields)
for item in iterable:
yield _process_fields(item, prepared_fields)
def _process_fields(item, fields):
item.update(fields)
return item
def _prepare_fields(fields):
prepared_fields = {}
for field in fields:
split_field = field.split('=', 1)
if len(split_field) > 1:
prepared_fields[split_field[0]] = split_field[1][:]
return { '@fields': prepared_fields }
def tag(iterable, tags=None, key='@tags'):
"""
Add tags to each dict or dict-like object in ``iterable``. Tags are added
to each dict with a key set by ``key``. If a key already exists under the
key given by ``key``, this function will attempt to ``.extend()``` it, but
will fall back to replacing it in the event of error.
"""
if not tags:
for item in iterable:
yield item
else:
for item in iterable:
yield _tag(item, tags, key)
def _now():
return datetime.datetime.utcnow()
def _tag(item, tags, key):
if item.get(key):
try:
item[key].extend(tags)
return item
except (AttributeError, TypeError):
pass
item[key] = tags[:]
return item
| mit | Python |
655cdb0c58a6eeb268c80818fa3a245c06f9404b | put tree_li on old | francisl/vcms,francisl/vcms,francisl/vcms,francisl/vcms | modules/vcms/apps/vwm/tree/generator.py | modules/vcms/apps/vwm/tree/generator.py | # encoding: utf-8
# copyright Vimba inc. 2010
# programmer : Francis Lavoie
from django import template
from django.template.loader import render_to_string
register = template.Library()
@register.inclusion_tag('tree/tree_dl.html')
def generetate_dl_tree(data, cssid, cssclass):
return {"data":data, "cssid":cssid, "cssclass": cssclass}
@register.inclusion_tag('tree_li.html')
def generetate_li_tree(data, cssid, cssclass):
return data, cssid, cssclass
def _generetate_dl_tree(data, cssid, cssclass):
""" When called from a function instead of a template tag
"""
return render_to_string('tree/tree_dl.html', {"data":data, "cssid":cssid, "cssclass": cssclass})
def _generetate_li_tree(data, cssid, cssclass):
return data, cssid, cssclass
def generate_tree(data, cssid="", cssclass="", type="dl"):
""" Take a list and generate a html tree
data : a list containing a dictionary
List item dictionary required field :
- url : used to generate <a> tag (default="#")
- name : name used to identified to item (default="Undefined")
- child_selected : boolean, if one of its childs is selected (default=False)
- selected : boolean, if the items is selected (default=False)
- items : list of items (recursive data structure for multi-level tree) (default=[])
cssid : string, id of the list container
cssclass : string, class name of thelist container
type : string, either "dl" for a definition list (<dl>/dl>) or "ul" for a unordered list (<ul></ul>)
"""
if type == "dl":
return _generetate_dl_tree(data, cssid, cssclass)
if type == "li":
return _generetate_dl_tree(data, cssid, cssclass)
# TODO: add generate_li_tree code
#return _generetate_li_tree(data, cssid, cssclass)
| # encoding: utf-8
# copyright Vimba inc. 2010
# programmer : Francis Lavoie
from django import template
from django.template.loader import render_to_string
register = template.Library()
@register.inclusion_tag('tree/tree_dl.html')
def generetate_dl_tree(data, cssid, cssclass):
return {"data":data, "cssid":cssid, "cssclass": cssclass}
@register.inclusion_tag('tree_li.html')
def generetate_li_tree(data, cssid, cssclass):
return data, cssid, cssclass
def _generetate_dl_tree(data, cssid, cssclass):
""" When called from a function instead of a template tag
"""
return render_to_string('tree/tree_dl.html', {"data":data, "cssid":cssid, "cssclass": cssclass})
def _generetate_li_tree(data, cssid, cssclass):
return data, cssid, cssclass
def generate_tree(data, cssid="", cssclass="", type="dl"):
""" Take a list and generate a html tree
data : a list containing a dictionary
List item dictionary required field :
- url : used to generate <a> tag (default="#")
- name : name used to identified to item (default="Undefined")
- child_selected : boolean, if one of its childs is selected (default=False)
- selected : boolean, if the items is selected (default=False)
- items : list of items (recursive data structure for multi-level tree) (default=[])
cssid : string, id of the list container
cssclass : string, class name of thelist container
type : string, either "dl" for a definition list (<dl>/dl>) or "ul" for a unordered list (<ul></ul>)
"""
if type == "dl":
return _generetate_dl_tree(data, cssid, cssclass)
if type == "li":
return _generetate_li_tree(data, cssid, cssclass)
| bsd-3-clause | Python |
0055e044a504e37fa96359e1bdb161fbad1acf78 | Update about_new_style_classes.py | yashwanthbabu/python_koans_python2_solutions,yashwanthbabu/python_koans_python2_solutions | python2/koans/about_new_style_classes.py | python2/koans/about_new_style_classes.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"An old style class"
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"A new style class"
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(True, issubclass(self.NewStyleClass, object))
self.assertEqual(False, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(2, len(dir(self.OldStyleClass)))
self.assertEqual("An old style class", self.OldStyleClass.__doc__)
self.assertEqual('koans.about_new_style_classes', self.OldStyleClass.__module__)
self.assertEqual(18, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual('classobj', type(self.OldStyleClass).__name__)
try:
cls = self.OldStyleClass.__class__.__name__
except Exception as ex:
pass
# What was that error message from the exception?
self.assertMatch("class OldStyleClass has no attribute '__class__'", ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(type(self.NewStyleClass), self.NewStyleClass.__class__)
self.assertEqual(
True,
type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual('OldStyleClass', old_style.__class__.__name__)
self.assertEqual('instance', type(old_style).__name__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual('NewStyleClass', new_style.__class__.__name__)
self.assertEqual(True, type(new_style) == new_style.__class__)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"An old style class"
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"A new style class"
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(True, issubclass(self.NewStyleClass, object))
self.assertEqual(False, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(2, len(dir(self.OldStyleClass)))
self.assertEqual("An old style class", self.OldStyleClass.__doc__)
self.assertEqual('koans.about_new_style_classes', self.OldStyleClass.__module__)
self.assertEqual(18, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual(__, type(self.OldStyleClass).__name__)
try:
cls = self.OldStyleClass.__class__.__name__
except Exception as ex:
pass
# What was that error message from the exception?
self.assertMatch("class OldStyleClass has no attribute '__class__'", ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(type(self.NewStyleClass), self.NewStyleClass.__class__)
self.assertEqual(
True,
type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual('OldStyleClass', old_style.__class__.__name__)
self.assertEqual('instance', type(old_style).__name__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual('NewStyleClass', new_style.__class__.__name__)
self.assertEqual(True, type(new_style) == new_style.__class__)
| mit | Python |
327903dce8105c320ed1188587e2104dd2ad2c04 | fix bug | nick6918/ThingCloud,nick6918/ThingCloud,nick6918/ThingCloud | ThingCloud/OrderSystem/models.py | ThingCloud/OrderSystem/models.py | # -*- coding: utf-8 -*-
from django.db import models
from AccountSystem.models import User
from CloudList.models import Address, WareHouse
from MainSystem.models import Employee
from django.forms.models import model_to_dict
from TCD_lib.utils import dictPolish
# Create your models here.
class Courier(models.Model):
crid = models.AutoField(primary_key=True)
employee = models.OneToOneField(Employee)
wh_belong = models.ForeignKey(WareHouse)
class Meta:
db_table = "work_courier"
class Order(models.Model):
oid = models.AutoField(primary_key=True)
addr = models.ForeignKey(Address, null=True, default=None)
user = models.ForeignKey(User)
notes = models.CharField(max_length=300)
fee = models.FloatField()
typeid = models.IntegerField()
itemList = models.CharField(max_length=200)
state = models.IntegerField()
create_time = models.DateTimeField()
paid_time = models.DateTimeField(null=True, default=None)
finish_time = models.DateTimeField(null=True, default=None)
prepayid = models.CharField(max_length=50)
signature = models.CharField(max_length=50)
showid = models.CharField(max_length=50)
courier = models.ForeignKey(Courier)
class Meta:
db_table = "orders"
def toDict(self):
_order = model_to_dict(self)
_order['courier_id'] = self.courier.employee.id
_order['courier_name'] = self.courier.employee.name
_address = self.addr.toDict()
_order.update(_address)
return dictPolish(_order)
class Complaint(models.Model):
cmpid = models.AutoField(primary_key=True)
state = models.IntegerField()
notes = models.CharField(max_length=300)
user = models.ForeignKey(User)
order = models.ForeignKey(Order)
class Meta:
db_table = "Complaints"
class VIPOrder(models.Model):
"""
Order to buy VIP.
"""
void = models.AutoField(primary_key=True)
month = models.IntegerField()
level = models.IntegerField()
user = models.ForeignKey(User)
prepayid = models.CharField(max_length=50)
fee = models.CharField(max_length=50)
#state == 0, 未支付, state = 1, 已支付, state = 2, 支付处理中
state = models.IntegerField()
class Meta:
db_table = "order_vip"
| # -*- coding: utf-8 -*-
from django.db import models
from AccountSystem.models import User
from CloudList.models import Address, WareHouse
from MainSystem.models import Employee
from django.forms.models import model_to_dict
from TCD_lib.utils import dictPolish
# Create your models here.
class Courier(models.Model):
crid = models.AutoField(primary_key=True)
employee = models.OneToOneField(Employee)
wh_belong = models.ForeignKey(WareHouse)
class Meta:
db_table = "work_courier"
class Order(models.Model):
oid = models.AutoField(primary_key=True)
addr = models.ForeignKey(Address, null=True, default=None)
user = models.ForeignKey(User)
notes = models.CharField(max_length=300)
fee = models.FloatField()
typeid = models.IntegerField()
itemList = models.CharField(max_length=200)
state = models.IntegerField()
create_time = models.DateTimeField()
paid_time = models.DateTimeField(null=True, default=None)
finish_time = models.DateTimeField(null=True, default=None)
prepayid = models.CharField(max_length=50)
signature = models.CharField(max_length=50)
showid = models.CharField(max_length=50)
courier = models.ForeignKey(Courier)
class Meta:
db_table = "orders"
def toDict(self):
_order = model_to_dict(self)
_order['courier_id'] = self.courier.id
_order['courier_name'] = self.courier.name
_address = self.addr.toDict()
_order.update(_address)
return dictPolish(_order)
class Complaint(models.Model):
cmpid = models.AutoField(primary_key=True)
state = models.IntegerField()
notes = models.CharField(max_length=300)
user = models.ForeignKey(User)
order = models.ForeignKey(Order)
class Meta:
db_table = "Complaints"
class VIPOrder(models.Model):
"""
Order to buy VIP.
"""
void = models.AutoField(primary_key=True)
month = models.IntegerField()
level = models.IntegerField()
user = models.ForeignKey(User)
prepayid = models.CharField(max_length=50)
fee = models.CharField(max_length=50)
#state == 0, 未支付, state = 1, 已支付, state = 2, 支付处理中
state = models.IntegerField()
class Meta:
db_table = "order_vip"
| mit | Python |
7e60524b9443d1815334aa7df9b44cfe363acafd | Update to version v0.7.7rc2 | jeffknupp/sandman,webwlsong/sandman,jeffknupp/sandman,beni55/sandman,webwlsong/sandman,webwlsong/sandman,jmcarp/sandman,jeffknupp/sandman,jmcarp/sandman,beni55/sandman,aprilcs/sandman,aprilcs/sandman,jmcarp/sandman,aprilcs/sandman,beni55/sandman | sandman/__init__.py | sandman/__init__.py | """Sandman automatically generates a REST API from your existing database,
without you needing to tediously define the tables in typical ORM style. Simply
create a class for each table you want to expose in the API, give it's
associated database table, and off you go! The generated API essentially exposes
a completely new system based on your existing data, using HATEOAS."""
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.secret_key = '42'
db = SQLAlchemy(app)
from . import sandman
__version__ = '0.7.7rc2'
| """Sandman automatically generates a REST API from your existing database,
without you needing to tediously define the tables in typical ORM style. Simply
create a class for each table you want to expose in the API, give it's
associated database table, and off you go! The generated API essentially exposes
a completely new system based on your existing data, using HATEOAS."""
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.secret_key = '42'
db = SQLAlchemy(app)
from . import sandman
__version__ = '0.7.7rc1'
| apache-2.0 | Python |
41d45d7dce8756658ec3d28540f4a5bce425ff1d | Update instagram regex. | ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit | icekit/plugins/instagram_embed/forms.py | icekit/plugins/instagram_embed/forms.py | import re
from django import forms
from fluent_contents.forms import ContentItemForm
class InstagramEmbedAdminForm(ContentItemForm):
def clean_url(self):
"""
Make sure the URL provided matches the instagram URL format.
"""
url = self.cleaned_data['url']
if url:
pattern = re.compile(r'https?://instagr(\.am|am\.com)/p/\S+')
if not pattern.match(url):
raise forms.ValidationError('Please provide a valid instagram link.')
return url
| import re
from django import forms
from fluent_contents.forms import ContentItemForm
class InstagramEmbedAdminForm(ContentItemForm):
def clean_url(self):
"""
Make sure the URL provided matches the instagram URL format.
"""
url = self.cleaned_data['url']
if url:
pattern = re.compile(r'https?://instagr.?am(.com)?/p/')
if not pattern.match(url):
raise forms.ValidationError('Please provide a valid instagram link.')
return url
| mit | Python |
82f66f42d5fee210c3bb2e535d559c5a5dbfccc4 | Clarify message for Unicode argument requrement | orome/crypto-enigma-py | crypto_enigma/utils.py | crypto_enigma/utils.py | #!/usr/bin/env python
# encoding: utf8
# Copyright (C) 2015 by Roy Levien.
# This file is part of crypto-enigma, an Enigma Machine simulator.
# released under the BSD-3 License (see LICENSE.txt).
"""
Description
.. note::
Any additional note.
"""
from __future__ import (absolute_import, print_function, division, unicode_literals)
import time
import sys
# TBD - Generalize to other platforms; test?
def print_over(s, delay=0.2):
print(s, end='\r')
print("\033[F" * (s.count('\n')+1))
sys.stdout.flush()
time.sleep(delay)
def num_A0(c):
return ord(c) - ord('A')
def chr_A0(n):
return chr(n + ord('A'))
def ordering(items):
return [i[1] for i in sorted(zip(items, range(0, len(items))))]
# standard simple-substitution cypher encoding
def encode_char(mapping, ch):
if ch == ' ':
return ' '
else:
return mapping[num_A0(ch)]
def encode_string(mapping, string):
return ''.join([encode_char(mapping, ch) for ch in string])
# scan, because it's missing from Python; implemented to anticipate Python 3
def accumulate(l, f):
it = iter(l)
total = next(it)
yield total
for element in it:
total = f(total, element)
yield total
# also missing from Python
def chunk_of(it, n):
return [it[i:i+n] for i in range(0, len(it), n)]
# require unicode strings (see unicode_literal in enigma.py) - http://stackoverflow.com/a/33743668/656912
def require_unicode(*given_arg_names):
def check_types(_func_, *args):
def modified(*args):
arg_names = list(_func_.func_code.co_varnames[:_func_.func_code.co_argcount])
if len(given_arg_names) == 0:
unicode_arg_names = arg_names
else:
unicode_arg_names = given_arg_names
for unicode_arg_name in unicode_arg_names:
try:
arg_index = arg_names.index(unicode_arg_name)
except ValueError:
raise NameError(unicode_arg_name)
arg = args[arg_index]
if not isinstance(arg, unicode):
raise TypeError("Parameter '{}' should be Unicode".format(unicode_arg_name))
return _func_(*args)
return modified
return check_types
| #!/usr/bin/env python
# encoding: utf8
# Copyright (C) 2015 by Roy Levien.
# This file is part of crypto-enigma, an Enigma Machine simulator.
# released under the BSD-3 License (see LICENSE.txt).
"""
Description
.. note::
Any additional note.
"""
from __future__ import (absolute_import, print_function, division, unicode_literals)
import time
import sys
# TBD - Generalize to other platforms; test?
def print_over(s, delay=0.2):
print(s, end='\r')
print("\033[F" * (s.count('\n')+1))
sys.stdout.flush()
time.sleep(delay)
def num_A0(c):
return ord(c) - ord('A')
def chr_A0(n):
return chr(n + ord('A'))
def ordering(items):
return [i[1] for i in sorted(zip(items, range(0, len(items))))]
# standard simple-substitution cypher encoding
def encode_char(mapping, ch):
if ch == ' ':
return ' '
else:
return mapping[num_A0(ch)]
def encode_string(mapping, string):
return ''.join([encode_char(mapping, ch) for ch in string])
# scan, because it's missing from Python; implemented to anticipate Python 3
def accumulate(l, f):
it = iter(l)
total = next(it)
yield total
for element in it:
total = f(total, element)
yield total
# also missing from Python
def chunk_of(it, n):
return [it[i:i+n] for i in range(0, len(it), n)]
# require unicode strings (see unicode_literal in enigma.py) - http://stackoverflow.com/a/33743668/656912
def require_unicode(*given_arg_names):
def check_types(_func_, *args):
def modified(*args):
arg_names = list(_func_.func_code.co_varnames[:_func_.func_code.co_argcount])
if len(given_arg_names) == 0:
unicode_arg_names = arg_names
else:
unicode_arg_names = given_arg_names
for unicode_arg_name in unicode_arg_names:
try:
arg_index = arg_names.index(unicode_arg_name)
except ValueError:
raise NameError(unicode_arg_name)
arg = args[arg_index]
if not isinstance(arg, unicode):
raise TypeError("Parameter '{}' should be a Unicode literal".format(unicode_arg_name))
return _func_(*args)
return modified
return check_types
| bsd-3-clause | Python |
0f9bdbcd2c0f927097f534770c56e96244ce28a5 | Add a textarea widget | theatrus/yyafl,miracle2k/yyafl | yyafl/widgets.py | yyafl/widgets.py | from yyafl.util import flatatt, smart_unicode
class Widget(object):
is_hidden = False
def __init__(self, attrs = None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs = None):
"""
Returns this widget as an HTML entry.
"""
raise NotImplementedError
def build_attrs(self, extra_attrs=None, **kwargs):
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
class Input(Widget):
input_type = None # Subclasses must define this.
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '': final_attrs['value'] = smart_unicode(value)
return u'<input%s />' % flatatt(final_attrs)
class TextInput(Input):
input_type = "text"
pass
class TextArea(Widget):
def render(self, name, value, attrs = None):
if value is None: value = '' # default value
final_attrs = self.build_attrs(attrs, name=name)
return u'<textarea' + flatatt(final_attrs) + u'>' + smart_unicode(value) + u'</textarea>'
class HiddenInput(Input):
input_type = "hidden"
is_hidden = True
| from yyafl.util import flatatt, smart_unicode
class Widget(object):
is_hidden = False
def __init__(self, attrs = None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs = None):
"""
Returns this widget as an HTML entry.
"""
raise NotImplementedError
def build_attrs(self, extra_attrs=None, **kwargs):
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
class Input(Widget):
input_type = None # Subclasses must define this.
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '': final_attrs['value'] = smart_unicode(value)
return u'<input%s />' % flatatt(final_attrs)
class TextInput(Input):
input_type = "text"
pass
class HiddenInput(Input):
input_type = "hidden"
is_hidden = True
| bsd-3-clause | Python |
f459fd25b774d59a263edc220b6b0289209d30d9 | Bump version; 0.1.5.dev1 | treasure-data/td-client-python | tdclient/version.py | tdclient/version.py | __version__ = "0.1.5.dev1"
| __version__ = "0.1.5.dev0"
| apache-2.0 | Python |
f99315cda065d5f869114f725a52e7d79189a041 | update sitemap generation for multiple profiles and ref GetRepositoryItem | kevinpdavies/pycsw,kalxas/pycsw,ingenieroariel/pycsw,PublicaMundi/pycsw,bukun/pycsw,benhowell/pycsw,PublicaMundi/pycsw,ocefpaf/pycsw,kalxas/pycsw,tomkralidis/pycsw,geopython/pycsw,ricardogsilva/pycsw,rouault/pycsw,rouault/pycsw,ricardogsilva/pycsw,tomkralidis/pycsw,ckan-fcd/pycsw-fcd,geopython/pycsw,ocefpaf/pycsw,ingenieroariel/pycsw,bukun/pycsw,kevinpdavies/pycsw,kalxas/pycsw,benhowell/pycsw,mwengren/pycsw,mwengren/pycsw,ckan-fcd/pycsw-fcd,bukun/pycsw,tomkralidis/pycsw,ricardogsilva/pycsw,geopython/pycsw | sbin/gen_sitemap.py | sbin/gen_sitemap.py | #!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =================================================================
#
# $Id$
#
# Authors: Tom Kralidis <tomkralidis@hotmail.com>
#
# Copyright (c) 2011 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# generate an XML sitemap from all records in repository
import os
from lxml import etree
from server import config, core_queryables, profile, repository, util
# get configuration and init repo connection
CFG = config.get_config('default.cfg')
REPOS = {}
REPOS[CFG['repository']['typename']] = \
repository.Repository(CFG['repository']['db'], CFG['repository']['db_table'])
REPOS[CFG['repository']['typename']].cq = \
core_queryables.CoreQueryables(CFG, 'na')
if CFG['server'].has_key('profiles'):
PROFILES = profile.load_profiles(
os.path.join('server', 'profiles'), profile.Profile,
CFG['server']['profiles'])
for prof in PROFILES['plugins'].keys():
tmp = PROFILES['plugins'][prof]()
REPOS[tmp.typename] = \
repository.Repository(
tmp.config['repository']['db'],
tmp.config['repository']['db_table'])
REPOS[tmp.typename].cq = \
core_queryables.CoreQueryables(tmp.config, 'na')
# write out sitemap document
URLSET = etree.Element(util.nspath_eval('sitemap:urlset'),
nsmap=config.NAMESPACES)
URLSET.attrib[util.nspath_eval('xsi:schemaLocation')] = \
'%s http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd' % \
config.NAMESPACES['sitemap']
# get all records
for repo in REPOS:
RECORDS = REPOS[repo].query()
for rec in RECORDS:
url = etree.SubElement(URLSET, util.nspath_eval('sitemap:url'))
uri = '%s?service=CSW&version=2.0.2&request=GetRepositoryItem&id=%s' % \
(CFG['server']['url'],
getattr(rec, REPOS[repo].cq.mappings['_id']['obj_attr']))
etree.SubElement(url, util.nspath_eval('sitemap:loc')).text = uri
# to stdout
print etree.tostring(URLSET, pretty_print = 1, \
encoding = CFG['server']['encoding'], xml_declaration=1)
| #!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =================================================================
#
# $Id$
#
# Authors: Tom Kralidis <tomkralidis@hotmail.com>
#
# Copyright (c) 2011 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# generate an XML sitemap from all records in repository
from lxml import etree
from server import config, repository, util
# get configuration and init repo connection
CFG = config.get_config('default.cfg')
REPO = repository.Repository(CFG['repository']['db'],
CFG['repository']['db_table'])
# get all records
RECORDS = REPO.query()
# write out sitemap document
URLSET = etree.Element(util.nspath_eval('sitemap:urlset'),
nsmap=config.NAMESPACES)
URLSET.attrib[util.nspath_eval('xsi:schemaLocation')] = \
'%s http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd' % \
config.NAMESPACES['sitemap']
for rec in RECORDS:
url = etree.SubElement(URLSET, util.nspath_eval('sitemap:url'))
uri = '%s?service=CSW&version=2.0.2&request=GetRecordById&id=%s&elementsetname=full' % \
(CFG['server']['url'], rec.identifier)
etree.SubElement(url, util.nspath_eval('sitemap:loc')).text = uri
# to stdout
print etree.tostring(URLSET, pretty_print = 1, \
encoding = CFG['server']['encoding'], xml_declaration=1)
| mit | Python |
e2d0e830c2b9b8615523d73ee53fb86d98c59c3b | Bump version; 0.12.1.dev0 [ci skip] | treasure-data/td-client-python | tdclient/version.py | tdclient/version.py | __version__ = "0.12.1.dev0"
| __version__ = "0.12.0"
| apache-2.0 | Python |
1179f0afbf3c3a6a306979306f2c4fa4e8669e09 | load example configs for defaults | svub/whatsapp-rest-webservice,svub/whatsapp-rest-webservice | service/quickstart/config.py | service/quickstart/config.py | import ConfigParser
import os
class Config:
defaults = {}
# load example.conf for defaults and overwrite if a real config has been created.
configfiles = ['whatsapp.example.conf', 'service/whatsapp.example.conf', 'whatsapp.conf', 'service/whatsapp.conf']
config=None
@classmethod
def get(self, section, value):
if not self.config:
# load configuration file once
self.config = ConfigParser.RawConfigParser(self.defaults);
self.config.read(self.configfiles)
print('Loading WhatsApp configuration.')
print('phone number='+self.config.get('credentials', 'phone'))
print('password='+self.config.get('credentials', 'password'))
return self.config.get(section, value)
| import ConfigParser
import os
class Config:
defaults = {}
configfiles = ['whatsapp.conf', 'service/whatsapp.conf']
config=None
@classmethod
def get(self, section, value):
if not self.config:
# load configuration file once
self.config = ConfigParser.RawConfigParser(self.defaults);
self.config.read(self.configfiles)
print('Loading WhatsApp configuration.')
print('phone number='+self.config.get('credentials', 'phone'))
print('password='+self.config.get('credentials', 'password'))
return self.config.get(section, value)
| mit | Python |
70fa93238128440f7098940ebc67d5bb56ae8dcb | make more consistent | RockefellerArchiveCenter/DACSspace | dacsspace/validator.py | dacsspace/validator.py | import json
from jsonschema import Draft202012Validator
class Validator:
"""Validates data from ArchivesSpace."""
def __init__(self, schema_identifier, schema_filepath):
"""Loads and validates the schema from an identifier or filepath.
Args:
schema_identifier (str): a pointer to a schema that is part of
DACSspace, located in the `schemas` directory.
schema_filepath (str): a filepath pointing to an external schema.
"""
self.validator = Draft202012Validator
if not schema_filepath:
schema_filepath = f"schemas/{schema_identifier.removesuffix('.json')}.json"
with open(schema_filepath, "r") as json_file:
self.schema = json.load(json_file)
self.validator.check_schema(self.schema)
def format_error(self, error):
"""Formats validation error messages.
Args:
error (jsonschema.exceptions.ValidationError): a validation error
received from jsonschema.
Returns:
message (str): a string representation of the validation error.
"""
if error.validator == "required":
return error.message
else:
schema_path = f"schema[{']['.join(repr(index) for index in error.schema_path)}]"
return f"Failed validating {repr(error.validator)} in {schema_path}: {error.schema}"
def validate_data(self, data):
"""Validates data.
Args:
data (dict): An ArchivesSpace object to be validated.
Returns:
result (dict): The result of the validation. An dictionary with the
object's URI, a boolean indication of the validation result and, if
necessary, an explanation of any validation errors.
{ "uri": "/repositories/2/resources/1234", "valid": False, "explanation": "You are missing the following fields..." }
"""
validator = self.validator(self.schema)
errors_found = [
self.format_error(error) for error in validator.iter_errors(data)]
if len(errors_found):
return {"uri": data["uri"], "valid": False,
"explanation": "\n".join(errors_found)}
else:
return {"uri": data["uri"], "valid": True}
| import json
from jsonschema import Draft202012Validator
class Validator:
"""Validates data from ArchivesSpace."""
def __init__(self, schema_identifier, schema_filepath):
"""Loads and validates the schema from an identifier or filepath.
Args:
schema_identifier (str): a pointer to a schema that is part of
DACSspace, located in the `schemas` directory.
schema_filepath (str): a filepath pointing to an external schema.
"""
self.validator = Draft202012Validator
if not schema_filepath:
schema_filepath = f"schemas/{schema_identifier.removesuffix('.json')}.json"
with open(schema_filepath, "r") as json_file:
self.schema = json.load(json_file)
self.validator.check_schema(self.schema)
def format_error(self, error):
"""Formats validation error messages.
Args:
error (jsonschema.exceptions.ValidationError): a validation error
received from jsonschema.
Returns:
message (str): a string representation of the validation error.
"""
if error.validator == "required":
return error.message
else:
schema_path = f"schema[{']['.join(repr(index) for index in error.schema_path)}]"
return f"Failed validating {error.validator!r} in {schema_path}: {error.schema}"
def validate_data(self, data):
"""Validates data.
Args:
data (dict): An ArchivesSpace object to be validated.
Returns:
result (dict): The result of the validation. An dictionary with the
object's URI, a boolean indication of the validation result and, if
necessary, an explanation of any validation errors.
{ "uri": "/repositories/2/resources/1234", "valid": False, "explanation": "You are missing the following fields..." }
"""
validator = self.validator(self.schema)
errors_found = [
self.format_error(error) for error in validator.iter_errors(data)]
if len(errors_found):
return {"uri": data["uri"], "valid": False,
"explanation": "\n".join(errors_found)}
else:
return {"uri": data["uri"], "valid": True}
| mit | Python |
87bc8092a86fb9e6fd10a4a620def2a22c742003 | Simplify code a bit | jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common | integration-tests/features/src/utils.py | integration-tests/features/src/utils.py | """Unsorted utility functions used in integration tests."""
import requests
import subprocess
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
def oc_login(url, username, password, tls_verify=True):
"""Wrapper around `oc login`.
:param url: str, OpenShift URL
:param username: str, username
:param password: str, password
:param tls_verify: bool, verify server's certificate?; default: True
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'login', url, '--username', username, '--password', password]
if not tls_verify:
command.extend(['--insecure-skip-tls-verify=true'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
# replace password with '***' so somebody will not accidentally leak it in CI logs
e.cmd = [x.replace(password, '***') for x in e.cmd]
raise e
def oc_delete_pods(selector, namespace=None):
"""Wrapper around `oc delete`.
Selector determines which pods will be deleted.
More on selectors:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Note k8s/OpenShift will immediately restart deleted pods,
to match desired number of replicas for given deployment.
The expectation is that the user is already logged in
and has permissions to delete pods.
Example usage:
oc_delete_pods('service=bayesian-pgbouncer'
:param selector: str, selector identifying pods that will be deleted
:param namespace: str, namespace in which `oc delete` command should be executed,
default: currently selected namespace
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'delete', 'pods', '--selector=', selector]
if namespace:
command.extend(['--namespace', namespace])
subprocess.check_call(command)
| """Unsorted utility functions used in integration tests."""
import requests
import subprocess
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
def oc_login(url, username, password, tls_verify=True):
"""Wrapper around `oc login`.
:param url: str, OpenShift URL
:param username: str, username
:param password: str, password
:param tls_verify: bool, verify server's certificate?; default: True
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'login', url, '--username', username, '--password', password]
if not tls_verify:
command.extend(['--insecure-skip-tls-verify=true'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
# replace password with '***' so somebody will not accidentally leak it in CI logs
e.cmd = [x if x != password else '***' for x in e.cmd]
raise e
def oc_delete_pods(selector, namespace=None):
"""Wrapper around `oc delete`.
Selector determines which pods will be deleted.
More on selectors:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Note k8s/OpenShift will immediately restart deleted pods,
to match desired number of replicas for given deployment.
The expectation is that the user is already logged in
and has permissions to delete pods.
Example usage:
oc_delete_pods('service=bayesian-pgbouncer'
:param selector: str, selector identifying pods that will be deleted
:param namespace: str, namespace in which `oc delete` command should be executed,
default: currently selected namespace
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'delete', 'pods', '--selector=', selector]
if namespace:
command.extend(['--namespace', namespace])
subprocess.check_call(command)
| apache-2.0 | Python |
c9f294a1c9284975e711dbe0b37519e68606543c | Update ripencc.py | aaronkaplan/intelmq,pkug/intelmq,pkug/intelmq,robcza/intelmq,pkug/intelmq,sch3m4/intelmq,sch3m4/intelmq,sch3m4/intelmq,certtools/intelmq,robcza/intelmq,sch3m4/intelmq,certtools/intelmq,certtools/intelmq,robcza/intelmq,robcza/intelmq,aaronkaplan/intelmq,pkug/intelmq,aaronkaplan/intelmq | intelmq/bots/experts/ripencc/ripencc.py | intelmq/bots/experts/ripencc/ripencc.py | from intelmq.lib.bot import Bot, sys
from intelmq.bots.experts.ripencc.lib import RIPENCC
'''
Reference: https://stat.ripe.net/data/abuse-contact-finder/data.json?resource=1.1.1.1
TODO:
Load RIPE networks prefixes into memory.
Compare each IP with networks prefixes loadad.
If ip matchs, query RIPE
'''
class RIPENCCExpertBot(Bot):
def process(self):
event = self.receive_message()
for key in ['source_','destination_']:
if event.contains(key + "ip"):
ip = event.value(key + "ip")
email = RIPENCC.query(ip)
if email:
event.add(key + "abuse_contact", email)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = RIPENCCExpertBot(sys.argv[1])
bot.start()
| from intelmq.lib.bot import Bot, sys
from intelmq.bots.experts.ripencc.lib import RIPENCC
'''
Reference: https://stat.ripe.net/data/abuse-contact-finder/data.json?resource=1.1.1.1
FIXME: Create a cache.
'''
class RIPENCCExpertBot(Bot):
def process(self):
event = self.receive_message()
for key in ['source_','destination_']:
if event.contains(key + "ip"):
ip = event.value(key + "ip")
email = RIPENCC.query(ip)
if email:
event.add(key + "abuse_contact", email)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = RIPENCCExpertBot(sys.argv[1])
bot.start()
| agpl-3.0 | Python |
e85fcd553756eab32cedca214c9b8b86ff48f8b8 | Make workload optional when editing vacancies | viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct | app/forms/vacancy.py | app/forms/vacancy.py | from flask_wtf import Form
from flask_babel import lazy_gettext as _ # noqa
from wtforms import StringField, SubmitField, TextAreaField, \
DateField, SelectField
from wtforms.validators import InputRequired
class VacancyForm(Form):
title = StringField(_('Title'), validators=[InputRequired(
message=_('A title is required.'))])
description = TextAreaField(_('Description'), validators=[InputRequired(
message=_('A description is required.'))])
start_date = DateField(_('Start date'), validators=[InputRequired(
message=_('Start date is required.'))])
end_date = DateField(_('End date'), validators=[InputRequired(
message=_('End date is required.'))])
contract_of_service = SelectField(_('Contract'),
choices=[('voltijd', _('Voltijd')),
('deeltijd', _('Deeltijd')),
('bijbaan', _('Bijbaan')),
('stage', _('Stage'))])
workload = StringField(_('Workload'))
company_id = SelectField(_('Company'), coerce=int)
submit = SubmitField(_('Submit'))
| from flask_wtf import Form
from flask_babel import lazy_gettext as _ # noqa
from wtforms import StringField, SubmitField, TextAreaField, \
DateField, SelectField
from wtforms.validators import InputRequired
class VacancyForm(Form):
title = StringField(_('Title'), validators=[InputRequired(
message=_('A title is required.'))])
description = TextAreaField(_('Description'), validators=[InputRequired(
message=_('A description is required.'))])
start_date = DateField(_('Start date'), validators=[InputRequired(
message=_('Start date is required.'))])
end_date = DateField(_('End date'), validators=[InputRequired(
message=_('End date is required.'))])
contract_of_service = SelectField(_('Contract'),
choices=[('voltijd', _('Voltijd')),
('deeltijd', _('Deeltijd')),
('bijbaan', _('Bijbaan')),
('stage', _('Stage'))])
workload = StringField(_('Workload'), validators=[InputRequired(
message=_('Workload is required.'))])
company_id = SelectField(_('Company'), coerce=int)
submit = SubmitField(_('Submit'))
| mit | Python |
c3cb5035889b7a428d0f2d99719765b6e1218e19 | Change initial twiss parameters | lnls-fac/sirius | sirius/LI_V00/accelerator.py | sirius/LI_V00/accelerator.py |
import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice
_default_cavity_on = False
_default_radiation_on = False
_default_vchamber_on = False
def create_accelerator():
accelerator = _pyaccel.accelerator.Accelerator(
lattice=_lattice.create_lattice(),
energy=_lattice._energy,
radiation_on=_default_radiation_on,
vchamber_on=_default_vchamber_on
)
return accelerator
_folder_code = _lnls.system.folder_code
accelerator_data = dict()
accelerator_data['lattice_version'] = 'LI_V00'
accelerator_data['dirs'] = {
'excitation_curves': _lnls.system.folder_excitation_curves,
}
accelerator_data['global_coupling'] = 1.00 # expected corrected value
accelerator_data['emittance'] = 170.3329758677203e-09 # [m·rad]
accelerator_data['energy_spread'] = 0.005
accelerator_data['twiss_at_exit'] = _pyaccel.optics.Twiss.make_new(beta=[7.0,7.0], alpha=[0.0,0.0])
accelerator_data['pressure_profile'] = None
|
import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice
_default_cavity_on = False
_default_radiation_on = False
_default_vchamber_on = False
def create_accelerator():
accelerator = _pyaccel.accelerator.Accelerator(
lattice=_lattice.create_lattice(),
energy=_lattice._energy,
radiation_on=_default_radiation_on,
vchamber_on=_default_vchamber_on
)
return accelerator
_folder_code = _lnls.system.folder_code
accelerator_data = dict()
accelerator_data['lattice_version'] = 'LI_V00'
accelerator_data['dirs'] = {
'excitation_curves': _lnls.system.folder_excitation_curves,
}
accelerator_data['global_coupling'] = 1.00 # expected corrected value
accelerator_data['emittance'] = 170.3329758677203e-09 # [m·rad]
accelerator_data['energy_spread'] = 0.005
accelerator_data['twiss_at_exit'] = _pyaccel.optics.Twiss.make_new(beta=[7.0,7.0], alpha=[-1.0,-1.0])
accelerator_data['pressure_profile'] = None
| mit | Python |
f94061d2aefa3c8b42bca884d4f98bad34ca2a81 | Load new brief_response_manifests | alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend | app/main/__init__.py | app/main/__init__.py | from flask import Blueprint
from dmcontent.content_loader import ContentLoader
main = Blueprint('main', __name__)
content_loader = ContentLoader('app/content')
content_loader.load_manifest('g-cloud-6', 'services', 'edit_service')
content_loader.load_messages('g-cloud-6', ['dates', 'urls'])
content_loader.load_manifest('g-cloud-7', 'services', 'edit_service')
content_loader.load_manifest('g-cloud-7', 'services', 'edit_submission')
content_loader.load_manifest('g-cloud-7', 'declaration', 'declaration')
content_loader.load_messages('g-cloud-7', ['dates', 'urls'])
content_loader.load_manifest('digital-outcomes-and-specialists', 'declaration', 'declaration')
content_loader.load_manifest('digital-outcomes-and-specialists', 'services', 'edit_submission')
content_loader.load_manifest('digital-outcomes-and-specialists', 'briefs', 'edit_brief')
content_loader.load_manifest('digital-outcomes-and-specialists', 'brief-responses', 'legacy_edit_brief_response')
content_loader.load_manifest('digital-outcomes-and-specialists', 'brief-responses', 'edit_brief_response')
content_loader.load_manifest('digital-outcomes-and-specialists', 'brief-responses', 'display_brief_response')
content_loader.load_messages('digital-outcomes-and-specialists', ['dates', 'urls'])
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'declaration', 'declaration')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'services', 'edit_submission')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'briefs', 'edit_brief')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'brief-responses', 'edit_brief_response')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'brief-responses', 'display_brief_response')
content_loader.load_messages('digital-outcomes-and-specialists-2', ['dates', 'urls'])
content_loader.load_manifest('g-cloud-8', 'services', 'edit_service')
content_loader.load_manifest('g-cloud-8', 'services', 'edit_submission')
content_loader.load_manifest('g-cloud-8', 'declaration', 'declaration')
content_loader.load_messages('g-cloud-8', ['dates', 'urls'])
@main.after_request
def add_cache_control(response):
response.cache_control.no_cache = True
return response
from .views import services, suppliers, login, frameworks, users, briefs
from . import errors
| from flask import Blueprint
from dmcontent.content_loader import ContentLoader
main = Blueprint('main', __name__)
content_loader = ContentLoader('app/content')
content_loader.load_manifest('g-cloud-6', 'services', 'edit_service')
content_loader.load_messages('g-cloud-6', ['dates', 'urls'])
content_loader.load_manifest('g-cloud-7', 'services', 'edit_service')
content_loader.load_manifest('g-cloud-7', 'services', 'edit_submission')
content_loader.load_manifest('g-cloud-7', 'declaration', 'declaration')
content_loader.load_messages('g-cloud-7', ['dates', 'urls'])
content_loader.load_manifest('digital-outcomes-and-specialists', 'declaration', 'declaration')
content_loader.load_manifest('digital-outcomes-and-specialists', 'services', 'edit_submission')
content_loader.load_manifest('digital-outcomes-and-specialists', 'briefs', 'edit_brief')
content_loader.load_manifest('digital-outcomes-and-specialists', 'brief-responses', 'legacy_edit_brief_response')
content_loader.load_manifest('digital-outcomes-and-specialists', 'brief-responses', 'display_brief_response')
content_loader.load_messages('digital-outcomes-and-specialists', ['dates', 'urls'])
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'declaration', 'declaration')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'services', 'edit_submission')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'briefs', 'edit_brief')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'brief-responses', 'edit_brief_response')
content_loader.load_manifest('digital-outcomes-and-specialists-2', 'brief-responses', 'display_brief_response')
content_loader.load_messages('digital-outcomes-and-specialists-2', ['dates', 'urls'])
content_loader.load_manifest('g-cloud-8', 'services', 'edit_service')
content_loader.load_manifest('g-cloud-8', 'services', 'edit_submission')
content_loader.load_manifest('g-cloud-8', 'declaration', 'declaration')
content_loader.load_messages('g-cloud-8', ['dates', 'urls'])
@main.after_request
def add_cache_control(response):
response.cache_control.no_cache = True
return response
from .views import services, suppliers, login, frameworks, users, briefs
from . import errors
| mit | Python |
240c274fb8f5620ffb08ba8a6f06ff377c749fdb | fix import | campagnola/acq4,pbmanis/acq4,acq4/acq4,pbmanis/acq4,acq4/acq4,acq4/acq4,campagnola/acq4,pbmanis/acq4,acq4/acq4,pbmanis/acq4,campagnola/acq4,campagnola/acq4 | acq4/devices/Pipette/__init__.py | acq4/devices/Pipette/__init__.py | from __future__ import print_function
from .pipette import Pipette, PipetteDeviceGui | from __future__ import print_function
from .pipette import Pipette | mit | Python |
9f725a6c4481d65e95c1d3119c96b13cd0543604 | Change the database connection | kinoreel/kinoreel-backend,kinoreel/kinoreel-backend | kinoreel_backend/settings/production.py | kinoreel_backend/settings/production.py | from .base import *
# SECURITY CONFIG
DEBUG = True
ALLOWED_HOSTS = ['api.kino-project.tech']
try:
from .GLOBALS import *
except ImportError or ModuleNotFoundError:
PG_SERVER = os.environ['PG_SERVER']
PG_PORT = os.environ['PG_PORT']
PG_DB = os.environ['PG_DB']
PG_USERNAME = os.environ['PG_USERNAME']
PG_PASSWORD = os.environ['PG_PASSWORD']
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': PG_SERVER,
'NAME': 'test_{}_dev'.format(PG_DB),
'USER': PG_USERNAME,
'PORT': PG_PORT,
'PASSWORD': PG_PASSWORD,
}
}
| from .base import *
# SECURITY CONFIG
DEBUG = True
ALLOWED_HOSTS = ['api.kino-project.tech']
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| mit | Python |
e4426ed90b883ffd6526fc9cc1f1011f215a9b38 | Correct env test. | ResidentMario/geoplot | scripts/test-env.py | scripts/test-env.py | """
This script tests whether the current environment works correctly or not.
"""
import sys; sys.path.insert(0, '../')
import geoplot as gplt
from geoplot import crs as gcrs
import geopandas as gpd
# cf. https://github.com/Toblerity/Shapely/issues/435
# Fiona/Shapely/Geopandas test.
cities = gpd.read_file("../data/cities/citiesx010g.shp")
census_tracts = gpd.read_file("../data/nyc_census_tracts/census_tracts_2010.geojson")
# Cartopy test.
gplt.pointplot(cities.head(50), extent=(10, 20, 10, 20)) | """
This script tests whether the current environment works correctly or not.
"""
import sys; sys.path.insert(0, '../')
import geoplot as gplt
from geoplot import crs as gcrs
import geopandas as gpd
# cf. https://github.com/Toblerity/Shapely/issues/435
# Fiona/Shapely/Geopandas test.
cities = gpd.read_file("../data/cities/citiesx010g.shp")
boroughs = gpd.read_file("../data/nyc_boroughs/boroughs.geojson")
# Cartopy test.
gplt.pointplot(cities.head(50), extent=(10, 20, 10, 20)) | mit | Python |
ca496c21989d92b0e1d717faa4e834326188e546 | fix lxml import after P3 migration | ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo | addons/board/controllers/main.py | addons/board/controllers/main.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree as ElementTree
from odoo.http import Controller, route, request
class Board(Controller):
@route('/board/add_to_dashboard', type='json', auth='user')
def add_to_dashboard(self, action_id, context_to_save, domain, view_mode, name=''):
# Retrieve the 'My Dashboard' action from its xmlid
action = request.env.ref('board.open_board_my_dash_action')
if action and action['res_model'] == 'board.board' and action['views'][0][1] == 'form' and action_id:
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = request.env['board.board'].fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None:
new_action = ElementTree.Element('action', {
'name': str(action_id),
'string': name,
'view_mode': view_mode,
'context': str(context_to_save),
'domain': str(domain)
})
column.insert(0, new_action)
arch = ElementTree.tostring(xml, encoding='unicode')
request.env['ir.ui.view.custom'].create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
})
return True
return False
| # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from xml.etree import ElementTree
from odoo.http import Controller, route, request
class Board(Controller):
@route('/board/add_to_dashboard', type='json', auth='user')
def add_to_dashboard(self, action_id, context_to_save, domain, view_mode, name=''):
# Retrieve the 'My Dashboard' action from its xmlid
action = request.env.ref('board.open_board_my_dash_action')
if action and action['res_model'] == 'board.board' and action['views'][0][1] == 'form' and action_id:
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = request.env['board.board'].fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None:
new_action = ElementTree.Element('action', {
'name': str(action_id),
'string': name,
'view_mode': view_mode,
'context': str(context_to_save),
'domain': str(domain)
})
column.insert(0, new_action)
arch = ElementTree.tostring(xml, encoding='unicode')
request.env['ir.ui.view.custom'].create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
})
return True
return False
| agpl-3.0 | Python |
2627c2c5ea6fdbff9d9979765deee8740a11c7c9 | Fix module packaging thanks to landscape.io hint | vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks | backend/globaleaks/handlers/__init__.py | backend/globaleaks/handlers/__init__.py | # -*- encoding: utf-8 -*-
#
# In here you will find all the handlers that are tasked with handling the
# requests specified in the API.
#
# From these handlers we will be instantiating models objects that will take care
# of our business logic and the generation of the output to be sent to GLClient.
#
# In here we are also tasked with doing the validation of the user supplied
# input. This must be done before instantiating any models object.
# Validation of input may be done with the functions inside of
# globaleaks.rest.
#
# See base.BaseHandler for details on the handlers.
__all__ = ['admin',
'base',
'files',
'node',
'receiver',
'rtip',
'submission',
'wbtip']
| # -*- encoding: utf-8 -*-
#
# In here you will find all the handlers that are tasked with handling the
# requests specified in the API.
#
# From these handlers we will be instantiating models objects that will take care
# of our business logic and the generation of the output to be sent to GLClient.
#
# In here we are also tasked with doing the validation of the user supplied
# input. This must be done before instantiating any models object.
# Validation of input may be done with the functions inside of
# globaleaks.rest.
#
# See base.BaseHandler for details on the handlers.
__all__ = ['admin',
'base',
'css',
'files',
'node',
'receiver',
'rtip',
'submission',
'wbtip']
| agpl-3.0 | Python |
d62dbd6e8068d0982e5b89d734c7ef09c7fd4871 | modify BufferGenerate | MatriQ/BufferGenerate | BufferGenerate.py | BufferGenerate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2015-05-12 18:35:26
# @Author : Matrixdom (Matrixdom@126.com)
# @Link :
# @Version : 0.1
#coding=utf-8
import re
import os
templateFilePattern=re.compile("^\w+\.tmp$")
#获取所有模板名
def getAllTemplateFiles(path):
result=set()
for parent,dirnames,filenames in os.walk(path):
for fileName in filenames:
m=templateFilePattern.search(fileName)
if m:
result.add(m.group())
return result
def genCodeFile(filename):
path=os.getcwd()+"/templates/"+filename
f=open(path)
line=f.readline()
lineNum=0
Temp={}
Temp["Class"]=filename.split(".")[0]
Temp["Fileds"]={}
while line!='':
if not re.compile(r'^(##.+)|([\r\n])$').match(line):
line=line.strip("\n")
if lineNum==0:
Temp["package"]=line
else:
#print(line.split(":"))
if len(line.split(":"))>1:
Temp["Fileds"][line.split(":")[0]]=line.split(":")[1]
lineNum+=1
line=f.readline()
return Temp
##read content of lang
def getTmplContent(lang):
path=os.getcwd()+"/"+lang+".tmpl"
f=open(path)
content=f.read()
f.close()
return content
allTemplates=getAllTemplateFiles(".")
for filename in allTemplates:
#dir(f)
tmp=genCodeFile(filename)
#print(tmp)
content=getTmplContent("go")
content=re.compile(r"\$PACKAGENAME\$").sub(tmp["package"],content)
content=re.compile(r"\$CLASSNAME\$").sub(tmp["Class"],content)
forcontents=re.findall(r'(<\s*for\s+(\$\w+?\$)\s+:\s+(\$\w+?\$)\s*>(.*?)</\s*for\s*>)',content,re.S)
if forcontents:
#print(forcontents[0])
for forcontent in forcontents:
var=forcontent[1]
col=forcontent[2]
subcontent=forcontent[3].rstrip("\n")
newContent=""
if col=="$Fields$":
for (filed,T) in tmp["Fileds"].items():
str=subcontent.replace("$PACKAGENAME$",tmp["package"])
#print(str.strip("\n"))
str=str.replace("$CLASSNAME$",tmp["package"])
str=str.replace(var+".$FieldsName$",filed)
str=str.replace(var+".$FieldsType$",T)
newContent+=str
content=content.replace(forcontent[0],newContent)
print(content)
print("....................")
#break
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2015-05-12 18:35:26
# @Author : Matrixdom (Matrixdom@126.com)
# @Link :
# @Version : 0.1
#coding=utf-8
import re
import os
templateFilePattern=re.compile("^\w+\.tmp$")
#获取所有模板名
def getAllTemplateFiles(path):
result=set()
for parent,dirnames,filenames in os.walk(path):
for fileName in filenames:
m=templateFilePattern.search(fileName)
if m:
result.add(m.group())
return result
def genCodeFile(f):
line=f.readline()
lineNum=0
Temp={}
Temp["Fileds"]={}
while line!='':
if not re.compile(r'^(##.+)|([\r\n])$').match(line):
line=line.strip("\n")
if lineNum==0:
Temp["package"]=line
else:
#print(line.split(":"))
if len(line.split(":"))>1:
Temp["Fileds"][line.split(":")[0]]=line.split(":")[1]
lineNum+=1
line=f.readline()
return Temp
allTemplates=getAllTemplateFiles(".")
for filename in allTemplates:
path=os.getcwd()+"/templates/"+filename
f=open(path)
#dir(f)
tmp=genCodeFile(f)
print(tmp)
| apache-2.0 | Python |
6728bf1fecef3ac1aea9489e7c74333ad0a3b552 | Comment out audo-discovery of addons | torebutlin/cued_datalogger | datalogger/__main__.py | datalogger/__main__.py | import sys
from PyQt5.QtWidgets import QApplication
from datalogger.bin.workspace import Workspace
from datalogger.analysis_window_testing import AnalysisWindow
def full():
app = 0
app = QApplication(sys.argv)
# Create the window
w = AnalysisWindow()
w.CurrentWorkspace = Workspace()
#w.CurrentWorkspace.path = "//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/"
# Load the workspace
#CurrentWorkspace.load("//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/tests/test_workspace.wsp")
#w.addon_widget.discover_addons(w.CurrentWorkspace.path + "addons/")
# Run the program
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
full()
| import sys
from PyQt5.QtWidgets import QApplication
from datalogger.bin.workspace import Workspace
from datalogger.analysis_window_testing import AnalysisWindow
def full():
app = 0
app = QApplication(sys.argv)
# Create the window
w = AnalysisWindow()
w.CurrentWorkspace = Workspace()
# Load the workspace
#CurrentWorkspace.load("//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/tests/test_workspace.wsp")
w.addon_widget.discover_addons(w.CurrentWorkspace.path + "addons/")
# Run the program
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
full()
| bsd-3-clause | Python |
e6079bd4059717cc05d8b514d958c0ca4910bd60 | bump version | dsten/datascience,data-8/datascience | datascience/version.py | datascience/version.py | __version__ = '0.5.11'
| __version__ = '0.5.10'
| bsd-3-clause | Python |
2be9eef18567acf8d0ea03849423ee222c6a6242 | Fix forgotten import | Teknologforeningen/tf-info,walliski/tf-info,olkku/tf-info,walliski/tf-info,olkku/tf-info,walliski/tf-info,Teknologforeningen/tf-info,olkku/tf-info,Teknologforeningen/tf-info | apps/dagsen/views.py | apps/dagsen/views.py | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.conf import settings
import json
import urllib2, urllib
import datetime
# Cache kept for
minutes = 10
cam_url = settings.CAM_URL
def nextMeal():
now = datetime.datetime.now()
if now.weekday() > 5 or now.hour > 16:
# Weekends and after 16 show next day
return 1
else:
# Else show current day
return 0
@cache_page(60 * minutes)
def index(request, language='sv'):
day = nextMeal()
try:
response = urllib2.urlopen("http://api.teknolog.fi/taffa/%s/json/%d/"%(language, day))
except:
return HttpResponse("Unable to access lunch API.", status=500)
try:
menu = json.load(response)
except ValueError as e:
return HttpResponse("Error parsing json from api", status=500)
return render_to_response('dagsen/index.html',menu,context_instance=RequestContext(request))
@cache_page(5)
def queuecam(request):
stream=urllib.urlopen(cam_url)
bytes=''
for x in range(0, 1000):
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
return HttpResponse(jpg, content_type="image/jpeg")
return HttpResponse("Unable to access camera.", status=500)
| from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
import json
import urllib2, urllib
import datetime
# Cache kept for
minutes = 10
cam_url = settings.CAM_URL
def nextMeal():
now = datetime.datetime.now()
if now.weekday() > 5 or now.hour > 16:
# Weekends and after 16 show next day
return 1
else:
# Else show current day
return 0
@cache_page(60 * minutes)
def index(request, language='sv'):
day = nextMeal()
try:
response = urllib2.urlopen("http://api.teknolog.fi/taffa/%s/json/%d/"%(language, day))
except:
return HttpResponse("Unable to access lunch API.", status=500)
try:
menu = json.load(response)
except ValueError as e:
return HttpResponse("Error parsing json from api", status=500)
return render_to_response('dagsen/index.html',menu,context_instance=RequestContext(request))
@cache_page(5)
def queuecam(request):
stream=urllib.urlopen(cam_url)
bytes=''
for x in range(0, 1000):
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
return HttpResponse(jpg, content_type="image/jpeg")
return HttpResponse("Unable to access camera.", status=500)
| bsd-3-clause | Python |
911badb2ababf1f265a755974098a35fd66ddbde | fix rjapi null response | hansroh/aquests,hansroh/aquests | aquests/lib/rjapi.py | aquests/lib/rjapi.py | import requests
import json
from urllib.parse import quote
def tostr (p):
if p:
return "?" + "&".join (["%s=%s" % (k, quote (str (v))) for k, v in p.items ()])
return ""
class API:
API_SERVER = "http://127.0.0.1:5000"
REQ_TIMEOUT = 30.0
def __init__ (self, server, timeout = 30.0):
self.API_SERVER = server
self.REQ_TIMEOUT = timeout
self.session = requests.Session ()
def decode (self, resp):
try:
return resp.json ()
except ValueError:
if resp.status_code == 200:
return resp.content
else:
raise
def post (self, uri, d):
return self.decode (self.session.post ("%s/%s" % (self.API_SERVER, uri), json.dumps (d), timeout = self.REQ_TIMEOUT))
def put (self, uri, d):
return self.decode (self.session.put ("%s/%s" % (self.API_SERVER, uri), json.dumps (d), timeout = self.REQ_TIMEOUT))
def patch (self, uri, d):
return self.decode (self.session.patch ("%s/%s" % (self.API_SERVER, uri), json.dumps (d), timeout = self.REQ_TIMEOUT))
def get (self, uri, p = {}):
return self.decode (self.session.get ("%s/%s%s" % (self.API_SERVER, uri, tostr (p)), timeout = self.REQ_TIMEOUT))
def delete (self, uri, p = {}):
return self.decode (self.session.delete ("%s/%s%s" % (self.API_SERVER, uri, tostr (p)), timeout = self.REQ_TIMEOUT))
def options (self, uri):
return self.decode (self.session.options ("%s/%s%s" % (self.API_SERVER, uri), timeout = self.REQ_TIMEOUT))
| import requests
import json
from urllib.parse import quote
def tostr (p):
if p:
return "?" + "&".join (["%s=%s" % (k, quote (str (v))) for k, v in p.items ()])
return ""
class API:
API_SERVER = "http://127.0.0.1:5000"
REQ_TIMEOUT = 30.0
def __init__ (self, server, timeout = 30.0):
self.API_SERVER = server
self.REQ_TIMEOUT = timeout
self.session = requests.Session ()
def post (self, uri, d):
return self.session.post ("%s/%s" % (self.API_SERVER, uri), json.dumps (d), timeout = self.REQ_TIMEOUT).json ()
def put (self, uri, d):
return self.session.put ("%s/%s" % (self.API_SERVER, uri), json.dumps (d), timeout = self.REQ_TIMEOUT).json ()
def patch (self, uri, d):
return self.session.patch ("%s/%s" % (self.API_SERVER, uri), json.dumps (d), timeout = self.REQ_TIMEOUT).json ()
def get (self, uri, p = {}):
return self.session.get ("%s/%s%s" % (self.API_SERVER, uri, tostr (p)), timeout = self.REQ_TIMEOUT).json ()
def delete (self, uri, p = {}):
return self.session.delete ("%s/%s%s" % (self.API_SERVER, uri, tostr (p)), timeout = self.REQ_TIMEOUT).json ()
def options (self, uri):
return self.session.options ("%s/%s%s" % (self.API_SERVER, uri), timeout = self.REQ_TIMEOUT).json ()
| mit | Python |
6d02e8feeac74c40831f1d96c5d251e060fb7ec0 | Add 'post comment' api | MACSIFS/IFS,MACSIFS/IFS,MACSIFS/IFS,MACSIFS/IFS | server/resources.py | server/resources.py | from flask import request
from flask_restful import Resource, Api, abort, reqparse
from .models import db, Comment, Lecture
api = Api()
class CommentListResource(Resource):
def get(self, lecture_id):
db_lecture = Lecture.query.filter(Lecture.id == lecture_id).first()
if not db_lecture:
abort(404, message="Lecture {} does not exist".format(lecture_id))
db_comments = Comment.query.filter(Comment.lecture_id == lecture_id)
comments = [
{'id': c.id, 'content': c.content}
for c in db_comments
]
return {
'comments': comments
}
def post(self, lecture_id):
db_lectures = Lecture.query.filter(Lecture.id == lecture_id).all()
if not db_lectures:
abort(404, message="Lecture {} does not exist".format(lecture_id))
parser = reqparse.RequestParser()
parser.add_argument('content', help='Text content of comment')
args = parser.parse_args()
if not args.content:
abort(400, message="Comment has no content parameter")
content = args.content
lecture = db_lectures[0]
comment = Comment(content, lecture)
db.session.add(comment)
db.session.commit()
return {
'id': comment.id
}
api.add_resource(CommentListResource, '/api/0/lectures/<lecture_id>/comments')
| from flask_restful import Resource, Api, abort
from .models import Comment, Lecture
api = Api()
class CommentListResource(Resource):
def get(self, lecture_id):
db_lecture = Lecture.query.filter(Lecture.id == lecture_id).first()
if not db_lecture:
abort(404, message="Lecture {} does not exist".format(lecture_id))
db_comments = Comment.query.filter(Comment.lecture_id == lecture_id)
comments = [
{'id': c.id, 'content': c.content}
for c in db_comments
]
return {
'comments': comments
}
api.add_resource(CommentListResource, '/api/0/lectures/<lecture_id>/comments')
| mit | Python |
58783b1dc96a7ee3d880a6483d38422d51930268 | drop to pdb on newest pypy | HPI-SWA-Lab/RSqueak,HPI-SWA-Lab/RSqueak,HPI-SWA-Lab/RSqueak,HPI-SWA-Lab/RSqueak | spyvm/plugins/vmdebugging.py | spyvm/plugins/vmdebugging.py | import os
from spyvm import model, error
from spyvm.plugins.plugin import Plugin
from spyvm.util.system import IS_WINDOWS
DebuggingPlugin = Plugin()
DebuggingPlugin.userdata['stop_ui'] = False
def stop_ui_process():
DebuggingPlugin.userdata['stop_ui'] = True
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def trace(interp, s_frame, w_rcvr):
# interp.trace = True
# return w_rcvr
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def untrace(interp, s_frame, w_rcvr):
# interp.trace = False
# return w_rcvr
if IS_WINDOWS:
def fork():
raise NotImplementedError("fork on windows")
else:
fork = os.fork
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def trace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.activate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def untrace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.deactivate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def halt(interp, s_frame, w_rcvr):
print s_frame.print_stack()
from rpython.config.translationoption import get_translation_config
from rpython.rlib.objectmodel import we_are_translated
if not we_are_translated() or get_translation_config().translation.lldebug or get_translation_config().translation.lldebug0:
import pdb; pdb.set_trace()
raise error.PrimitiveFailedError
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isRSqueak(interp, s_frame, w_rcvr):
return interp.space.w_true
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isVMTranslated(interp, s_frame, w_rcvr):
from rpython.rlib.objectmodel import we_are_translated
if we_are_translated():
return interp.space.w_true
else:
return interp.space.w_false
@DebuggingPlugin.expose_primitive(unwrap_spec=[object, object])
def debugPrint(interp, s_frame, w_rcvr, w_string):
if not isinstance(w_string, model.W_BytesObject):
raise error.PrimitiveFailedError()
print interp.space.unwrap_string(w_string).replace('\r', '\n')
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def stopUIProcess(interp, s_frame, w_rcvr):
if DebuggingPlugin.userdata.get('stop_ui', False):
return interp.space.w_true
else:
return interp.space.w_false
| import os
from spyvm import model, error
from spyvm.plugins.plugin import Plugin
from spyvm.util.system import IS_WINDOWS
DebuggingPlugin = Plugin()
DebuggingPlugin.userdata['stop_ui'] = False
def stop_ui_process():
DebuggingPlugin.userdata['stop_ui'] = True
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def trace(interp, s_frame, w_rcvr):
# interp.trace = True
# return w_rcvr
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def untrace(interp, s_frame, w_rcvr):
# interp.trace = False
# return w_rcvr
if IS_WINDOWS:
def fork():
raise NotImplementedError("fork on windows")
else:
fork = os.fork
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def trace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.activate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def untrace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.deactivate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def halt(interp, s_frame, w_rcvr):
print s_frame.print_stack()
# No, this is not a mistake. Update your pypy checkout!
import pdb; pdb.set_trace()
raise error.PrimitiveFailedError
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isRSqueak(interp, s_frame, w_rcvr):
return interp.space.w_true
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isVMTranslated(interp, s_frame, w_rcvr):
from rpython.rlib.objectmodel import we_are_translated
if we_are_translated():
return interp.space.w_true
else:
return interp.space.w_false
@DebuggingPlugin.expose_primitive(unwrap_spec=[object, object])
def debugPrint(interp, s_frame, w_rcvr, w_string):
if not isinstance(w_string, model.W_BytesObject):
raise error.PrimitiveFailedError()
print interp.space.unwrap_string(w_string).replace('\r', '\n')
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def stopUIProcess(interp, s_frame, w_rcvr):
if DebuggingPlugin.userdata.get('stop_ui', False):
return interp.space.w_true
else:
return interp.space.w_false
| bsd-3-clause | Python |
f2aabd3bdcd522769b28b5e160677d8a96ca6bb8 | fix https://github.com/PyThaiNLP/pythainlp/issues/155 | franziz/artagger | artagger/__init__.py | artagger/__init__.py | # -*- coding: utf-8 -*-
from .Utility.Utils import getWordTag, readDictionary
from .InitialTagger.InitialTagger import initializeSentence
from .SCRDRlearner.SCRDRTree import SCRDRTree
from .SCRDRlearner.Object import FWObject
import os
import copy
import pickle
import codecs
class Word:
def __init__(self, **kwargs):
self.word = kwargs.get("word", None)
self.tag = kwargs.get("tag", None)
class RDRPOSTagger(SCRDRTree):
"""
RDRPOSTagger for a particular language
"""
def __init__(self):
self.root = None
def tagRawSentence(self, dictionary, rawLine):
line = initializeSentence(dictionary, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
if node.depth > 0:
sen.append(Word(word=word, tag=node.conclusion))
# sen.append(word + "/" + node.conclusion)
else:# Fired at root, return initialized tag
sen.append(Word(word=word, tag=tag))
sen.append(word + "/" + tag)
return sen
# return " ".join(sen)
class Tagger:
def __init__(self, **kwargs):
self.language = kwargs.get("language", "th")
self.text = kwargs.get("text", None)
self.model = {}
self.load_model()
def load_model(self):
self.model.update({"th":{
"rdr": codecs.open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.RDR"), "r", encoding="utf-8-sig"),
"dict": codecs.open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.DICT"), "r", encoding="utf-8-sig")
}})
def tag(self, text):
self.text = copy.copy(text)
tagger = RDRPOSTagger()
rdr_file = self.model[self.language]["rdr"]
dict_file = self.model[self.language]["dict"]
tagger.constructSCRDRtreeFromRDRfile(rdr_file.readlines())
dictionary = readDictionary(dict_file.readlines())
return tagger.tagRawSentence(dictionary, self.text)
| # -*- coding: utf-8 -*-
from .Utility.Utils import getWordTag, readDictionary
from .InitialTagger.InitialTagger import initializeSentence
from .SCRDRlearner.SCRDRTree import SCRDRTree
from .SCRDRlearner.Object import FWObject
import os
import copy
import pickle
import codecs
class Word:
def __init__(self, **kwargs):
self.word = kwargs.get("word", None)
self.tag = kwargs.get("tag", None)
class RDRPOSTagger(SCRDRTree):
"""
RDRPOSTagger for a particular language
"""
def __init__(self):
self.root = None
def tagRawSentence(self, dictionary, rawLine):
line = initializeSentence(dictionary, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
if node.depth > 0:
sen.append(Word(word=word, tag=node.conclusion))
# sen.append(word + "/" + node.conclusion)
else:# Fired at root, return initialized tag
sen.append(Word(word=word, tag=tag))
sen.append(word + "/" + tag)
return sen
# return " ".join(sen)
class Tagger:
def __init__(self, **kwargs):
self.language = kwargs.get("language", "th")
self.text = kwargs.get("text", None)
self.model = {}
self.load_model()
def load_model(self):
self.model.update({"th":{
"rdr": codecs.open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.RDR"), "r", encoding="utf8"),
"dict": codecs.open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.DICT"), "r", encoding="utf8")
}})
def tag(self, text):
self.text = copy.copy(text)
tagger = RDRPOSTagger()
rdr_file = self.model[self.language]["rdr"]
dict_file = self.model[self.language]["dict"]
tagger.constructSCRDRtreeFromRDRfile(rdr_file.readlines())
dictionary = readDictionary(dict_file.readlines())
return tagger.tagRawSentence(dictionary, self.text)
| apache-2.0 | Python |
605e362351bf3839ab6c15a67c29c339c6509d43 | increase version | pedvide/simetuc | simetuc/__init__.py | simetuc/__init__.py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 14:27:37 2016
@author: Pedro
"""
VERSION = '1.6.4'
DESCRIPTION = 'simetuc: Simulating Energy Transfer and Upconversion'
| # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 14:27:37 2016
@author: Pedro
"""
DESCRIPTION = 'simetuc: Simulating Energy Transfer and Upconversion'
| mit | Python |
35685f77d9590c7208877934f5e29851efac1277 | Fix incomplete docstring | privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea | privacyidea/lib/utils/compare.py | privacyidea/lib/utils/compare.py | # -*- coding: utf-8 -*-
#
# 2019-07-02 Friedrich Weber <friedrich.weber@netknights.it>
# Add a central module for comparing two values
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
This module implements comparisons between two values.
It is tested in test_lib_utils_compare.py
"""
import logging
log = logging.getLogger(__name__)
class CompareError(Exception):
def __init__(self, message):
self.message = message
def _compare_equality(left, comparator, right):
"""
Return True if two values are exactly equal, according to Python semantics.
"""
return left == right
def _compare_contains(left, comparator, right):
"""
Return True if ``left`` has ``right`` as an element. Raise a CompareError if
``left`` is not a list.
"""
if isinstance(left, list):
return right in left
else:
raise CompareError(u"Left value must be a list, not {!r}".format(type(left)))
COMPARATORS = {
"==": _compare_equality,
"contains": _compare_contains,
}
def compare_values(left, comparator, right):
"""
Compare two values according to ``comparator`` and return either True or False.
If the comparison is invalid, raise a CompareError with a descriptive message.
:param left: Left operand of the comparison
:param comparator: Comparator to use, one of ``COMPARATORS``
:param right: Right operand of the comparison
:return: True or False
"""
if comparator in COMPARATORS:
return COMPARATORS[comparator](left, comparator, right)
else:
# We intentionally leave out the values, in case sensitive values are compared
raise CompareError(u"Invalid comparator: {!r}".format(comparator))
__all__ = [COMPARATORS, compare_values]
| # -*- coding: utf-8 -*-
#
# 2019-07-02 Friedrich Weber <friedrich.weber@netknights.it>
# Add a central module for comparing two values
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
This module implements comparisons between two values.
It is tested in test_lib_utils_compare.py
"""
import logging
log = logging.getLogger(__name__)
class CompareError(Exception):
def __init__(self, message):
self.message = message
def _compare_equality(left, comparator, right):
"""
Return True if two values are exactly equal, according to Python semantics.
"""
return left == right
def _compare_contains(left, comparator, right):
"""
Return True if ``left`` has ``right`` as an element. Raise a CompareError if
``left`` is not a list.
"""
if isinstance(left, list):
return right in left
else:
raise CompareError(u"Left value must be a list, not {!r}".format(type(left)))
COMPARATORS = {
"==": _compare_equality,
"contains": _compare_contains,
}
def compare_values(left, comparator, right):
"""
Compare two values according to ``comparator`` and return either True or False.
If the comparison is invalid, raise a CompareError with a descriptive message.
:param left:
:param comparator:
:param right:
:return:
"""
if comparator in COMPARATORS:
return COMPARATORS[comparator](left, comparator, right)
else:
# We intentionally leave out the values, in case sensitive values are compared
raise CompareError(u"Invalid comparator: {!r}".format(comparator))
__all__ = [COMPARATORS, compare_values]
| agpl-3.0 | Python |
d5aba53b285587f2d13ba3e5372ed699354c9135 | Add red_check | jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools | problem/color/find_color_name.py | problem/color/find_color_name.py | #! /usr/bin/env python
from functools import lru_cache
import colormath as cm
import colormath.color_objects as co
import seaborn as sns
assert 949 == len(sns.xkcd_rgb) # e.g. 'very light green': '#d1ffbd'
@lru_cache()
def get_name_to_srgb():
keys = sns.xkcd_rgb.keys()
return dict(zip(keys, sns.xkcd_palette(keys)))
def red_check():
red = get_name_to_srgb()['bright red']
red = co.sRGBColor(*red)
assert not red.is_upscaled
assert '#ff000d' == red.get_rgb_hex() == sns.xkcd_rgb['bright red']
if __name__ == '__main__':
red_check()
| #! /usr/bin/env python
from functools import lru_cache
import colormath as cm
import colormath.color_objects as co
import seaborn as sns
assert 949 == len(sns.xkcd_rgb) # e.g. 'very light green': '#d1ffbd'
@lru_cache()
def get_name_to_srgb():
keys = sns.xkcd_rgb.keys()
return dict(zip(keys, sns.xkcd_palette(keys)))
if __name__ == '__main__':
print(sns.xkcd_rgb['bright red'])
red = get_name_to_srgb()['bright red']
red = co.sRGBColor(*red)
print(red, red.is_upscaled)
print(co.sRGBColor(*red.get_value_tuple()),
co.sRGBColor(*red.get_value_tuple()).is_upscaled)
up = co.sRGBColor(*red.get_upscaled_value_tuple())
print(up, up.is_upscaled)
| mit | Python |
a50c93a5166dc341640fad71fcb62466608d2494 | fix potential exceptions | guiniol/py3status,alexoneill/py3status,ultrabug/py3status,tobes/py3status,guiniol/py3status,ultrabug/py3status,valdur55/py3status,valdur55/py3status,tobes/py3status,docwalter/py3status,vvoland/py3status,valdur55/py3status,Andrwe/py3status,ultrabug/py3status,Andrwe/py3status | py3status/modules/taskwarrior.py | py3status/modules/taskwarrior.py | # -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: refresh interval for this module (default 5)
format: display format for this module (default '{task}')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
"""
import json
STRING_NOT_INSTALLED = "taskwarrior: isn't installed"
STRING_ERROR = "taskwarrior: unknown error"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
def taskWarrior(self):
if not self.py3.check_commands('task'):
return {
'cached_until': self.py3.CACHE_FOREVER,
'color': self.py3.COLOR_BAD,
'full_text': STRING_NOT_INSTALLED
}
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
try:
task_command = 'task start.before:tomorrow status:pending export'
task_json = json.loads(self.py3.command_output(task_command))
task_result = ', '.join(map(describeTask, task_json))
except:
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'color': self.py3.COLOR_BAD,
'full_text': STRING_ERROR
}
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': task_result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| # -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: refresh interval for this module (default 5)
format: display format for this module (default '{task}')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
"""
import json
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
def taskWarrior(self):
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
task_command = 'task start.before:tomorrow status:pending export'
task_json = json.loads(self.py3.command_output(task_command))
task_result = ', '.join(map(describeTask, task_json))
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': task_result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | Python |
4a09ef767d28671c567fc5ac657e37ccf124d13b | change ssl make with shell | wdongxv/DDDProxy,wdongxv/DDDProxy,wdongxv/DDDProxy,wdongxv/DDDProxy | DDDProxyConfig.py | DDDProxyConfig.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2015年1月11日
@author: dx.wang
'''
import os
import ssl
import threading
from os.path import dirname
localServerProxyListenPort = 8080
localServerAdminListenPort = 8081
localServerListenIp = "0.0.0.0"
remoteServerHost = None
remoteServerListenPort = 8083
remoteServerListenIp = "0.0.0.0"
remoteServerAuth = None
blockHost = []
debuglevel = 1
timeout = 600
cacheSize = 1024 * 2
baseDir = dirname(__file__)
SSLLocalCertPath = baseDir+"/tmp/cert.local.pem"
SSLCertPath = baseDir+"/tmp/cert.remote.pem"
SSLKeyPath = baseDir+"/tmp/key.remote.pem"
pacDomainConfig = baseDir+"/tmp/DDDProxy.domain.json"
domainAnalysisConfig = baseDir+"/tmp/DDDProxy.domainAnalysis.json"
createCertLock = threading.RLock()
def fetchRemoteCert():
createCertLock.acquire()
if not os.path.exists(SSLLocalCertPath):
cert = ssl.get_server_certificate(addr=(remoteServerHost, remoteServerListenPort))
open(SSLLocalCertPath, "wt").write(cert)
createCertLock.release()
def createSSLCert():
createCertLock.acquire()
if not os.path.exists(SSLCertPath) or not os.path.exists(SSLCertPath):
shell = "openssl req -new -newkey rsa:1024 -days 3650 -nodes -x509 -subj \"/C=US/ST=Denial/L=Springfield/O=Dis/CN=dddproxy\" -keyout %s -out %s"%(
SSLKeyPath,SSLCertPath)
os.system(shell)
createCertLock.release()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2015年1月11日
@author: dx.wang
'''
import os
import ssl
import threading
import random
from os.path import dirname
from OpenSSL import crypto
localServerProxyListenPort = 8080
localServerAdminListenPort = 8081
localServerListenIp = "0.0.0.0"
remoteServerHost = None
remoteServerListenPort = 8083
remoteServerListenIp = "0.0.0.0"
remoteServerAuth = None
blockHost = []
debuglevel = 1
timeout = 600
cacheSize = 1024 * 2
baseDir = dirname(__file__)
SSLLocalCertPath = baseDir+"/tmp/cert.local.pem"
SSLCertPath = baseDir+"/tmp/cert.remote.pem"
SSLKeyPath = baseDir+"/tmp/key.remote.pem"
pacDomainConfig = baseDir+"/tmp/DDDProxy.domain.json"
domainAnalysisConfig = baseDir+"/tmp/DDDProxy.domainAnalysis.json"
createCertLock = threading.RLock()
def fetchRemoteCert():
createCertLock.acquire()
if not os.path.exists(SSLLocalCertPath):
cert = ssl.get_server_certificate(addr=(remoteServerHost, remoteServerListenPort))
open(SSLLocalCertPath, "wt").write(cert)
createCertLock.release()
def createSSLCert():
createCertLock.acquire()
if not os.path.exists(SSLCertPath) or not os.path.exists(SSLCertPath):
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
cert = crypto.X509()
cert.get_subject().C = "CN"
cert.get_subject().ST = "%f"%(random.random()*10)
cert.get_subject().L = "%f"%(random.random()*10)
cert.get_subject().O = "%f"%(random.random()*10)
cert.get_subject().OU = "%f"%(random.random()*10)
cert.get_subject().CN = "%f"%(random.random()*10)
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(315360000)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
certPem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
keyPem = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
open(SSLCertPath, "wt").write(certPem)
open(SSLKeyPath, "wt").write(keyPem)
createCertLock.release()
| apache-2.0 | Python |
f682351e08d925293e4a428afc8a379a45c8d2ac | change the boxplot script. now supports grouping | Buchhold/QLever,Buchhold/QLever,Buchhold/QLever,Buchhold/QLever,Buchhold/QLever | misc/create_boxplot_from_compare_out.py | misc/create_boxplot_from_compare_out.py | import argparse
import seaborn as sns
import pandas as pd
import numpy as np
__author__ = 'buchholb'
parser = argparse.ArgumentParser()
parser.add_argument('--times-file',
type=str,
help='Output of the compare_performance.py script',
required=True)
def get_data_frame_from_input_file(in_file):
data = {'type': {}, 'approach': {}, 'time in ms': {}, 'nof matches': {}}
col_id_to_label = {}
first_line = True
id = 0
for line in open(in_file):
if first_line:
labels = line.strip().split('\t')
for i, l in enumerate(labels):
col_id_to_label[i] = l
first_line = False
else:
if line.startswith('--'):
continue
vals = line.strip().split('\t')
type = vals[0].split('_')[0]
time = ""
approach = ""
for i in range(2, len(vals)):
if i % 2 == 0:
if vals[i] == '-':
time = np.nan
else:
time = vals[i].strip().rstrip('ms').rstrip()
approach = col_id_to_label[i]
else:
nof_matches = vals[i]
data['type'][id] = type
data['approach'][id] = approach
data['time in ms'][id] = float(time)
data['nof matches'][id] = int(nof_matches)
id += 1
return pd.DataFrame(data)
def main():
args = vars(parser.parse_args())
in_file = args['times_file']
df = get_data_frame_from_input_file(in_file)
print(df)
ax = sns.boxplot(x='type', y='time in ms', hue='approach', data=df)
ax.set(yscale="log")
ax.figure.savefig(in_file + '.boxplot.png')
if __name__ == '__main__':
main()
| import argparse
import seaborn as sns
import pandas as pd
__author__ = 'buchholb'
parser = argparse.ArgumentParser()
parser.add_argument('--times-file',
type=str,
help='Output of the compare_performance.py script',
required=True)
def get_data_frame_from_input_file(in_file):
label_to_id_val_dict = {}
col_id_to_label = {}
first_line = True
for line in open(in_file):
if first_line:
labels = line.strip().split('\t')
label_to_id_val_dict['type'] = {}
for i, l in enumerate(labels):
col_id_to_label[i] = l
if i >= 2 and i % 2 == 0:
label_to_id_val_dict[l] = {}
first_line = False
else:
if line.startswith('--'):
continue
vals = line.strip().split('\t')
for i, v in enumerate(vals):
type = vals[0].split('_')[0]
label_to_id_val_dict['type'][vals[0]] = type
if i >= 2 and i % 2 == 0 and v != '-':
l = col_id_to_label[i]
label_to_id_val_dict[l][vals[0]] = v.strip().rstrip('ms').rstrip()
return pd.DataFrame(label_to_id_val_dict)
def main():
args = vars(parser.parse_args())
in_file = args['times_file']
df = get_data_frame_from_input_file(in_file)
print(df)
ax = sns.boxplot(df, x=df.type)
ax.set(yscale="log")
ax.figure.savefig(in_file + '.boxplot.png')
if __name__ == '__main__':
main()
| apache-2.0 | Python |
a5faeb16a599b4260f32741846607dd05f10bc53 | Add ability to specify which settings are used via `export PRODUCTION=0' | django-settings/django-settings | myproject/myproject/project_settings.py | myproject/myproject/project_settings.py | # Project Settings - Settings that don't exist in settings.py that you want to
# add (e.g. USE_THOUSAND_SEPARATOR, GRAPPELLI_ADMIN_TITLE, CELERYBEAT_SCHEDULER,
# CELERYD_PREFETCH_MULTIPLIER, etc.)
#USE_THOUSAND_SEPARATOR = True
#GRAPPELLI_ADMIN_TITLE = ''
#import djcelery
#djcelery.setup_loader()
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
#CELERYD_PREFETCH_MULTIPLIER = 1
import os
import sys
if 'PRODUCTION' in os.environ and os.environ['PRODUCTION'].lower() in [True, 'y', 'yes', '1',]:
from production_settings import *
elif 'runserver' in sys.argv:
from local_settings import *
else:
from production_settings import *
| # Project Settings - Settings that don't exist in settings.py that you want to
# add (e.g. USE_THOUSAND_SEPARATOR, GRAPPELLI_ADMIN_TITLE, CELERYBEAT_SCHEDULER,
# CELERYD_PREFETCH_MULTIPLIER, etc.)
#USE_THOUSAND_SEPARATOR = True
#GRAPPELLI_ADMIN_TITLE = ''
#import djcelery
#djcelery.setup_loader()
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
#CELERYD_PREFETCH_MULTIPLIER = 1
import sys
if 'runserver' in sys.argv:
from local_settings import *
else:
from production_settings import *
| unlicense | Python |
69af087e7115ff8443cfccb6979cdbeea95345b9 | change from manual toggling to automatic toggling | bardia-heydarinejad/Graph,bardia-heydarinejad/Graph,bardia73/Graph,bardia73/Graph | Graph/settings.py | Graph/settings.py | """
Django settings for Graph project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'am=#-+@+9x4-gt92((qt^0y_@fmawp@b(+jt7k2#gj1uri^imk'
# SECURITY WARNING: don't run with debug turned on in production!
#There is a way to automatically toggle between the two environments
#This solves the probable Problems forever.
import socket
if socket.gethostname() == '<a href="http://productionserver.com" target="_blank">productionserver.com</a>':
DEBUG = False
else:
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Graph.urls'
WSGI_APPLICATION = 'Graph.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| """
Django settings for Graph project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'am=#-+@+9x4-gt92((qt^0y_@fmawp@b(+jt7k2#gj1uri^imk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Graph.urls'
WSGI_APPLICATION = 'Graph.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| mit | Python |
ca2026bdf4d9b148115a236979c85ef761c85bc8 | fix logic in test_gf_coverage script | googlefonts/gftools,googlefonts/gftools | test_gf_coverage.py | test_gf_coverage.py | #!/usr/bin/env python2
# Copyright 2016 The Fontbakery Authors
# Copyright 2017 The Google Fonts Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from util.google_fonts import (CodepointsInFont,
codepointsInNamelist)
NAM_DIR = os.path.join("encodings", "GF Glyph Sets")
NAM_FILES = [
"GF-latin-core_unique-glyphs.nam",
"GF-latin-expert_unique-glyphs.nam",
"GF-latin-plus_optional-glyphs.nam",
"GF-latin-plus_unique-glyphs.nam",
"GF-latin-pro_optional-glyphs.nam",
"GF-latin-pro_unique-glyphs.nam"
]
def main():
if len(sys.argv) != 2 or sys.argv[1][-4:] != ".ttf":
sys.exit('Usage: {} fontfile.ttf'.format(sys.argv[0]))
expected = set()
for nam_file in NAM_FILES:
nam_filepath = os.path.join(NAM_DIR, nam_file)
expected.update(codepointsInNamelist(nam_filepath))
filename = sys.argv[1]
diff = expected - CodepointsInFont(filename)
print filename,
if bool(diff):
print 'missing',
for c in sorted(diff):
print '0x%04X' % (c),
else:
print 'OK'
if __name__ == '__main__':
main()
| #!/usr/bin/env python2
# Copyright 2016 The Fontbakery Authors
# Copyright 2017 The Google Fonts Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from util.google_fonts import (CodepointsInFont,
codepointsInNamelist)
NAM_DIR = os.path.join("encodings", "GF Glyph Sets")
NAM_FILES = [
"GF-latin-core_unique-glyphs.nam",
"GF-latin-expert_unique-glyphs.nam",
"GF-latin-plus_optional-glyphs.nam",
"GF-latin-plus_unique-glyphs.nam",
"GF-latin-pro_optional-glyphs.nam",
"GF-latin-pro_unique-glyphs.nam"
]
def main():
if len(sys.argv) != 2 or sys.argv[1][-4:] != ".ttf":
sys.exit('Usage: {} fontfile.ttf'.format(sys.argv[0]))
expected = set()
for nam_file in NAM_FILES:
nam_filepath = os.path.join(NAM_DIR, nam_file)
expected.update(codepointsInNamelist(nam_filepath))
filename = sys.argv[1]
cps = CodepointsInFont(filename)
diff = cps - expected
print filename,
if bool(diff):
print 'missing',
for c in sorted(diff):
print '0x%04X' % (c),
else:
print 'OK'
if __name__ == '__main__':
main()
| apache-2.0 | Python |
7cbad2ecab781391758ed7391612644d7a695652 | make things derive from SimError | f-prettyland/angr,angr/angr,chubbymaggie/simuvex,iamahuman/angr,f-prettyland/angr,chubbymaggie/angr,chubbymaggie/angr,axt/angr,chubbymaggie/simuvex,angr/angr,iamahuman/angr,schieb/angr,schieb/angr,iamahuman/angr,tyb0807/angr,f-prettyland/angr,tyb0807/angr,schieb/angr,axt/angr,tyb0807/angr,angr/angr,angr/simuvex,axt/angr,chubbymaggie/simuvex,zhuyue1314/simuvex,chubbymaggie/angr | simuvex/s_errors.py | simuvex/s_errors.py | #!/usr/bin/env python
class SimError(Exception):
pass
class SimIRSBError(SimError):
pass
class SimModeError(SimError):
pass
class SimProcedureError(SimError):
pass
class SimMergeError(SimError):
pass
class SimValueError(SimError):
pass
class SimUnsatError(SimValueError):
pass
class SimMemoryError(SimError):
pass
| #!/usr/bin/env python
class SimError(Exception):
pass
class SimIRSBError(SimError):
pass
class SimModeError(SimError):
pass
class SimProcedureError(Exception):
pass
class SimMergeError(Exception):
pass
class SimValueError(Exception):
pass
class SimUnsatError(SimValueError):
pass
class SimMemoryError(SimError):
pass
| bsd-2-clause | Python |
51f107f3e218b42553340ca59b55386c39787a33 | Fix required package intent_service | forslund/mycroft-core,MycroftAI/mycroft-core,Dark5ide/mycroft-core,MycroftAI/mycroft-core,forslund/mycroft-core,aatchison/mycroft-core,linuxipho/mycroft-core,Dark5ide/mycroft-core,aatchison/mycroft-core,linuxipho/mycroft-core | skills-sdk-setup.py | skills-sdk-setup.py | from setuptools import setup
from mycroft.util.setup_base import get_version, place_manifest
__author__ = 'seanfitz'
place_manifest("skills-sdk-MANIFEST.in")
setup(
name="mycroft-skills-sdk",
version=get_version(),
install_requires=[
"mustache==0.1.4",
"configobj==5.0.6",
"pyee==1.0.1",
"adapt-parser==0.2.1",
"padatious==0.1.4"
"websocket-client==0.32.0"
],
packages=[
"mycroft.configuration",
"mycroft.dialog",
"mycroft.filesystem",
"mycroft.messagebus",
"mycroft.messagebus.client",
"mycroft.session",
"mycroft.skills.intent_service",
"mycroft.skills",
"mycroft.util",
"mycroft"
],
include_package_data=True,
entry_points={
'console_scripts': [
'mycroft-skill-container=mycroft.skills.container:main'
]
}
)
| from setuptools import setup
from mycroft.util.setup_base import get_version, place_manifest
__author__ = 'seanfitz'
place_manifest("skills-sdk-MANIFEST.in")
setup(
name="mycroft-skills-sdk",
version=get_version(),
install_requires=[
"mustache==0.1.4",
"configobj==5.0.6",
"pyee==1.0.1",
"adapt-parser==0.2.1",
"padatious==0.1.4"
"websocket-client==0.32.0"
],
packages=[
"mycroft.configuration",
"mycroft.dialog",
"mycroft.filesystem",
"mycroft.messagebus",
"mycroft.messagebus.client",
"mycroft.session",
"mycroft.skills.intent",
"mycroft.skills",
"mycroft.util",
"mycroft"
],
include_package_data=True,
entry_points={
'console_scripts': [
'mycroft-skill-container=mycroft.skills.container:main'
]
}
)
| apache-2.0 | Python |
4c16bd7c11aabb96e6df0ebbfcf600f92950e4d5 | fix get_indices | theislab/scvelo | tests/test_basic.py | tests/test_basic.py | from scvelo.tools.utils import prod_sum_obs, prod_sum_var, norm
import numpy as np
import scvelo as scv
def test_einsum():
adata = scv.datasets.toy_data(n_obs=100)
scv.pp.recipe_velocity(adata, n_top_genes=300)
Ms, Mu = adata.layers['Ms'], adata.layers['Mu']
assert np.allclose(prod_sum_obs(Ms, Mu), np.sum(Ms * Mu, 0))
assert np.allclose(prod_sum_var(Ms, Mu), np.sum(Ms * Mu, 1))
assert np.allclose(norm(Ms), np.linalg.norm(Ms, axis=1))
def test_velocity_graph():
adata = scv.datasets.toy_data(n_obs=1000)
scv.pp.recipe_velocity(adata, n_top_genes=300)
scv.tl.velocity(adata)
scv.tl.velocity_graph(adata)
graph1 = adata.uns['velocity_graph'].copy()
scv.tl.velocity_graph(adata, n_jobs=2)
graph2 = adata.uns['velocity_graph'].copy()
assert np.allclose((scv.tl.transition_matrix(adata) > 0).toarray(), (graph1 > 0).toarray())
assert np.allclose(graph1.toarray(), graph2.toarray())
| from scvelo.tools.utils import prod_sum_obs, prod_sum_var, norm
import numpy as np
import scvelo as scv
def test_einsum():
adata = scv.datasets.toy_data(n_obs=100)
scv.pp.recipe_velocity(adata)
Ms, Mu = adata.layers['Ms'], adata.layers['Mu']
assert np.allclose(prod_sum_obs(Ms, Mu), np.sum(Ms * Mu, 0))
assert np.allclose(prod_sum_var(Ms, Mu), np.sum(Ms * Mu, 1))
assert np.allclose(norm(Ms), np.linalg.norm(Ms, axis=1))
def test_velocity_graph():
adata = scv.datasets.toy_data(n_obs=1000)
scv.pp.recipe_velocity(adata, n_top_genes=300)
scv.tl.velocity(adata)
scv.tl.velocity_graph(adata)
graph1 = adata.uns['velocity_graph'].copy()
scv.tl.velocity_graph(adata, n_jobs=2)
graph2 = adata.uns['velocity_graph'].copy()
assert np.allclose((scv.tl.transition_matrix(adata) > 0).toarray(), (graph1 > 0).toarray())
assert np.allclose(graph1.toarray(), graph2.toarray())
| bsd-3-clause | Python |
204c771c77b03ae1829ba09c6382d19c13591c41 | move rest result for good | xflr6/graphviz | tests/test_files.py | tests/test_files.py | import os
import graphviz
def test_save_source_from_files(tmp_path):
dot = graphviz.Digraph(directory=tmp_path)
dot.edge('hello', 'world')
dot.render()
old_stat = os.stat(dot.filepath)
source = graphviz.Source.from_file(dot.filepath)
source.save()
assert os.stat(dot.filepath).st_mtime == old_stat.st_mtime
| import os
import graphviz
def test_save_source_from_files(tmp_path):
dot = graphviz.Digraph()
dot.edge('hello', 'world')
dot.render()
old_stat = os.stat(dot.filepath)
source = graphviz.Source.from_file(dot.filepath)
source.save()
assert os.stat(dot.filepath).st_mtime == old_stat.st_mtime
| mit | Python |
21f1a34989c6170abffd2cae03188b9147a1462d | Make get_updates test less strict | davande/hackernews-top,rylans/hackernews-top | tests/test_hnapi.py | tests/test_hnapi.py | """
Tests
"""
from __future__ import unicode_literals
import unittest
from hnapi.connectors.api_connector import ApiConnector
class HnapiTest(unittest.TestCase):
"""
Test hnapi
"""
def test_get_item_by(self):
"""
Test item retrieval and 'by' field
"""
con = ApiConnector()
item = con.get_item(8863)
byline = item.get('by')
self.assertEqual(byline, 'dhouston')
def test_get_max_item(self):
"""
Test retrieval of the max item without error
"""
con = ApiConnector()
max_item_id = con.get_max_item()
max_item = con.get_item(max_item_id)
self.assertTrue(max_item.get('id') > 0)
def test_get_updates_users(self):
"""
Test retrieval of new users
"""
con = ApiConnector()
updates = con.get_updates()
self.assertTrue(len(updates.get('profiles')) > 1)
user = con.get_user(updates.get('profiles')[0])
year_2001 = 1000000000
self.assertTrue(user.get('created') > year_2001)
def test_get_updates_item(self):
"""
Test retrieval of new items
"""
con = ApiConnector()
updates = con.get_updates()
self.assertTrue(len(updates.get('items')) > 1)
item = con.get_item(updates.get('items')[0])
year_2001 = 1000000000
self.assertTrue(item.get('time') > year_2001)
if __name__ == '__main__':
unittest.main()
| """
Tests
"""
from __future__ import unicode_literals
import unittest
from hnapi.connectors.api_connector import ApiConnector
class HnapiTest(unittest.TestCase):
"""
Test hnapi
"""
def test_get_item_by(self):
"""
Test item retrieval and 'by' field
"""
con = ApiConnector()
item = con.get_item(8863)
byline = item.get('by')
self.assertEqual(byline, 'dhouston')
def test_get_max_item(self):
"""
Test retrieval of the max item without error
"""
con = ApiConnector()
max_item_id = con.get_max_item()
max_item = con.get_item(max_item_id)
self.assertTrue(max_item.get('id') > 0)
def test_get_updates_users(self):
"""
Test retrieval of new users
"""
con = ApiConnector()
updates = con.get_updates()
self.assertTrue(len(updates.get('profiles')) > 10)
user = con.get_user(updates.get('profiles')[0])
year_2001 = 1000000000
self.assertTrue(user.get('created') > year_2001)
def test_get_updates_item(self):
"""
Test retrieval of new items
"""
con = ApiConnector()
updates = con.get_updates()
self.assertTrue(len(updates.get('items')) > 10)
item = con.get_item(updates.get('items')[0])
year_2001 = 1000000000
self.assertTrue(item.get('time') > year_2001)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
b026917c58d98bf70df452a7e04252af010ca441 | Add test for dashboard change quantity form | itbabu/saleor,tfroehlich82/saleor,maferelo/saleor,UITools/saleor,car3oon/saleor,UITools/saleor,HyperManTT/ECommerceSaleor,mociepka/saleor,KenMutemi/saleor,UITools/saleor,mociepka/saleor,itbabu/saleor,UITools/saleor,HyperManTT/ECommerceSaleor,maferelo/saleor,UITools/saleor,KenMutemi/saleor,maferelo/saleor,car3oon/saleor,car3oon/saleor,KenMutemi/saleor,itbabu/saleor,mociepka/saleor,tfroehlich82/saleor,jreigel/saleor,HyperManTT/ECommerceSaleor,jreigel/saleor,jreigel/saleor,tfroehlich82/saleor | tests/test_order.py | tests/test_order.py | from prices import Price
from saleor.cart.models import Cart
from saleor.dashboard.order.forms import ChangeQuantityForm
from saleor.order import models
def test_total_property():
order = models.Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
def test_total_property_empty_value():
order = models.Order(total_net=None, total_tax=None)
assert order.total is None
def test_total_setter():
price = Price(net=10, gross=20, currency='USD')
order = models.Order()
order.total = price
assert order.total_net.net == 10
assert order.total_tax.net == 10
def test_stock_allocation(billing_address, product_in_stock):
variant = product_in_stock.variants.get()
cart = Cart()
cart.save()
cart.add(variant, quantity=2)
order = models.Order.objects.create(billing_address=billing_address)
delivery_group = models.DeliveryGroup.objects.create(order=order)
delivery_group.add_items_from_partition(cart)
order_line = delivery_group.items.get()
stock = order_line.stock
assert stock.quantity_allocated == 2
def test_dashboard_change_quantity_form(billing_address, product_in_stock):
start_quantity = 1
variant = product_in_stock.variants.get()
cart = Cart()
cart.save()
cart.add(variant, quantity=start_quantity)
order = models.Order.objects.create(billing_address=billing_address)
group = models.DeliveryGroup.objects.create(order=order)
group.add_items_from_partition(cart)
order_line = group.items.get()
expected_quantity = order_line.stock.quantity_allocated
# Check available quantity validation
form = ChangeQuantityForm({'quantity': 9999},
instance=order_line)
assert not form.is_valid()
assert group.items.get().stock.quantity_allocated == expected_quantity
# Save same quantity
form = ChangeQuantityForm({'quantity': start_quantity},
instance=order_line)
assert form.is_valid()
form.save()
assert group.items.get().stock.quantity_allocated == expected_quantity
# Increase quantity
expected_quantity += 1
form = ChangeQuantityForm({'quantity': start_quantity + 1},
instance=order_line)
assert form.is_valid()
form.save()
assert group.items.get().stock.quantity_allocated == expected_quantity
# Decrease quantity
expected_quantity -= 1
form = ChangeQuantityForm({'quantity': start_quantity},
instance=order_line)
assert form.is_valid()
form.save()
assert group.items.get().stock.quantity_allocated == expected_quantity
| from prices import Price
from saleor.cart.models import Cart
from saleor.order import models
def test_total_property():
order = models.Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
def test_total_property_empty_value():
order = models.Order(total_net=None, total_tax=None)
assert order.total is None
def test_total_setter():
price = Price(net=10, gross=20, currency='USD')
order = models.Order()
order.total = price
assert order.total_net.net == 10
assert order.total_tax.net == 10
def test_stock_allocation(billing_address, product_in_stock):
variant = product_in_stock.variants.get()
cart = Cart()
cart.save()
cart.add(variant, quantity=2)
order = models.Order.objects.create(billing_address=billing_address)
delivery_group = models.DeliveryGroup.objects.create(order=order)
delivery_group.add_items_from_partition(cart)
order_line = delivery_group.items.get()
stock = order_line.stock
assert stock.quantity_allocated == 2
| bsd-3-clause | Python |
2db8c4874e10db7c78f4255df45b14a9484990ec | Update social_auth/urls.py | duoduo369/django-social-auth,vuchau/django-social-auth,adw0rd/django-social-auth,vuchau/django-social-auth,VishvajitP/django-social-auth,beswarm/django-social-auth,dongguangming/django-social-auth,gustavoam/django-social-auth,WW-Digital/django-social-auth,caktus/django-social-auth,michael-borisov/django-social-auth,sk7/django-social-auth,michael-borisov/django-social-auth,getsentry/django-social-auth,MjAbuz/django-social-auth,vxvinh1511/django-social-auth,limdauto/django-social-auth,qas612820704/django-social-auth,caktus/django-social-auth,MjAbuz/django-social-auth,qas612820704/django-social-auth,beswarm/django-social-auth,gustavoam/django-social-auth,dongguangming/django-social-auth,VishvajitP/django-social-auth,krvss/django-social-auth,omab/django-social-auth,limdauto/django-social-auth,vxvinh1511/django-social-auth,omab/django-social-auth | social_auth/urls.py | social_auth/urls.py | """URLs module"""
try:
from django.conf.urls import patterns, url
except ImportError:
# for Django version less then 1.4
from django.conf.urls.defaults import patterns, url
from social_auth.views import auth, complete, disconnect
urlpatterns = patterns('',
# authentication
url(r'^login/(?P<backend>[^/]+)/$', auth,
name='socialauth_begin'),
url(r'^complete/(?P<backend>[^/]+)/$', complete,
name='socialauth_complete'),
# XXX: Deprecated, this URLs are deprecated, instead use the login and
# complete ones directly, they will differentiate the user intention
# by checking it's authenticated status association.
url(r'^associate/(?P<backend>[^/]+)/$', auth,
name='socialauth_associate_begin'),
url(r'^associate/complete/(?P<backend>[^/]+)/$', complete,
name='socialauth_associate_complete'),
# disconnection
url(r'^disconnect/(?P<backend>[^/]+)/$', disconnect,
name='socialauth_disconnect'),
url(r'^disconnect/(?P<backend>[^/]+)/(?P<association_id>[^/]+)/$',
disconnect, name='socialauth_disconnect_individual'),
)
| """URLs module"""
try:
from django.conf.urls import patterns, url
except ImportError:
# for Django version less then 1.4
from django.conf.urls.default import patterns, url
from social_auth.views import auth, complete, disconnect
urlpatterns = patterns('',
# authentication
url(r'^login/(?P<backend>[^/]+)/$', auth,
name='socialauth_begin'),
url(r'^complete/(?P<backend>[^/]+)/$', complete,
name='socialauth_complete'),
# XXX: Deprecated, this URLs are deprecated, instead use the login and
# complete ones directly, they will differentiate the user intention
# by checking it's authenticated status association.
url(r'^associate/(?P<backend>[^/]+)/$', auth,
name='socialauth_associate_begin'),
url(r'^associate/complete/(?P<backend>[^/]+)/$', complete,
name='socialauth_associate_complete'),
# disconnection
url(r'^disconnect/(?P<backend>[^/]+)/$', disconnect,
name='socialauth_disconnect'),
url(r'^disconnect/(?P<backend>[^/]+)/(?P<association_id>[^/]+)/$',
disconnect, name='socialauth_disconnect_individual'),
)
| bsd-3-clause | Python |
72add1034c9f085b73ec45eff5f3cb686e0322cb | update phylo test | AndersenLab/vcf-toolbox,AndersenLab/vcf-kit,AndersenLab/vcf-kit,AndersenLab/vcf-toolbox,AndersenLab/vcf-toolbox,AndersenLab/vcf-kit | tests/test_phylo.py | tests/test_phylo.py | from vcfkit import phylo
from subprocess import Popen, PIPE
import hashlib
from tests.test_utilities import Capturing
def test_phylo_fasta():
with Capturing() as out:
phylo.main(["phylo", "fasta", "test_data/test.vcf.gz"])
h = hashlib.sha224(str(out).strip()).hexdigest()
assert h == "3e13b81be200e1128d6ee52ff68139d84ac7d638bc3bf3d272241766"
def test_phylo_nj():
with Capturing() as out:
phylo.main(["phylo", "tree", "nj", "test_data/test.vcf.gz"])
h = hashlib.sha224(str(out).strip()).hexdigest()
assert h == "5faa8909fab5d5141e8232d1fb05705a01cd608cba9610d740166fff"
def test_phylo_upgma():
with Capturing() as out:
comm = ["phylo", "tree", "upgma", "test_data/test.vcf.gz"]
phylo.main(comm)
h = hashlib.sha224(str(out).strip()).hexdigest()
assert h == "4d0deed3dd8421ebf4c3f4458e027c7f2af7c4a699bfd8ce0f3067cf"
def test_phylo_plot():
comm = ["phylo", "tree", "upgma", "--plot", "test_data/test.vcf.gz"]
out = phylo.main(comm)
h = hashlib.sha224(str(out).strip()).hexdigest() | from vcfkit import phylo
from subprocess import Popen, PIPE
import hashlib
from tests.test_utilities import Capturing
def test_phylo_fasta():
with Capturing() as out:
phylo.main(["phylo", "fasta", "test_data/test.vcf.gz"])
h = hashlib.sha224(str(out)).hexdigest()
assert h == "3e13b81be200e1128d6ee52ff68139d84ac7d638bc3bf3d272241766"
def test_phylo_nj():
with Capturing() as out:
phylo.main(["phylo", "tree", "nj", "test_data/test.vcf.gz"])
h = hashlib.sha224(str(out)).hexdigest()
assert h == "5faa8909fab5d5141e8232d1fb05705a01cd608cba9610d740166fff"
def test_phylo_upgma():
with Capturing() as out:
comm = ["phylo", "tree", "upgma", "test_data/test.vcf.gz"]
phylo.main(comm)
h = hashlib.sha224(str(out)).hexdigest()
assert h == "4d0deed3dd8421ebf4c3f4458e027c7f2af7c4a699bfd8ce0f3067cf"
def test_phylo_plot():
comm = ["phylo", "tree", "upgma", "--plot", "test_data/test.vcf.gz"]
out = phylo.main(comm)
h = hashlib.sha224(str(out)).hexdigest() | mit | Python |
794596fd6f55806eecca1c54e155533590108eee | Rename misleading parameter name: UnicodeDictReader should have the same interface as csv.DictReader | CivicVision/datahub,nathanhilbert/FPA_Core,USStateDept/FPA_Core,johnjohndoe/spendb,openspending/spendb,spendb/spendb,CivicVision/datahub,pudo/spendb,CivicVision/datahub,nathanhilbert/FPA_Core,pudo/spendb,openspending/spendb,johnjohndoe/spendb,spendb/spendb,nathanhilbert/FPA_Core,openspending/spendb,johnjohndoe/spendb,spendb/spendb,USStateDept/FPA_Core,pudo/spendb,USStateDept/FPA_Core | openspending/lib/unicode_dict_reader.py | openspending/lib/unicode_dict_reader.py | # work around python2's csv.py's difficulty with utf8
# partly cribbed from http://stackoverflow.com/questions/5478659/python-module-like-csv-dictreader-with-full-utf8-support
import csv
class EmptyCSVError(Exception):
pass
class UnicodeDictReader(object):
def __init__(self, fp, encoding='utf8', **kwargs):
self.encoding = encoding
self.reader = csv.DictReader(fp, **kwargs)
if not self.reader.fieldnames:
raise EmptyCSVError("No fieldnames in CSV reader: empty file?")
self.keymap = dict((k, k.decode(encoding)) for k in self.reader.fieldnames)
def __iter__(self):
return (self._decode_row(row) for row in self.reader)
def _decode_row(self, row):
return dict(
(self.keymap[k], self._decode_str(v)) for k, v in row.iteritems()
)
def _decode_str(self, s):
if s is None:
return None
return s.decode(self.encoding)
| # work around python2's csv.py's difficulty with utf8
# partly cribbed from http://stackoverflow.com/questions/5478659/python-module-like-csv-dictreader-with-full-utf8-support
import csv
class EmptyCSVError(Exception):
pass
class UnicodeDictReader(object):
def __init__(self, file_or_str, encoding='utf8', **kwargs):
self.encoding = encoding
self.reader = csv.DictReader(file_or_str, **kwargs)
if not self.reader.fieldnames:
raise EmptyCSVError("No fieldnames in CSV reader: empty file?")
self.keymap = dict((k, k.decode(encoding)) for k in self.reader.fieldnames)
def __iter__(self):
return (self._decode_row(row) for row in self.reader)
def _decode_row(self, row):
return dict(
(self.keymap[k], self._decode_str(v)) for k, v in row.iteritems()
)
def _decode_str(self, s):
if s is None:
return None
return s.decode(self.encoding)
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.