commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
008663dedf2d0f123a76a0f55e1297fb8c466346 | Remove imports | explosion/thinc,spacy-io/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc | thinc/api.py | thinc/api.py | from .config import Config, registry
from .initializers import normal_init, uniform_init, glorot_uniform_init, zero_init
from .loss import CategoricalCrossentropy, L1Distance, CosineDistance
from .loss import SequenceCategoricalCrossentropy
from .model import Model, serialize_attr, deserialize_attr
from .model import set_dropout_rate, change_attr_values
from .shims import Shim, PyTorchShim, TensorFlowShim
from .optimizers import Adam, RAdam, SGD, Optimizer
from .schedules import cyclic_triangular, warmup_linear, constant, constant_then
from .schedules import decaying, slanted_triangular, compounding
from .types import Ragged, Padded, ArgsKwargs
from .util import fix_random_seed, is_cupy_array, set_active_gpu
from .util import prefer_gpu, require_gpu
from .util import to_categorical, get_width, get_array_module
from .util import torch2xp, xp2torch, tensorflow2xp, xp2tensorflow
from .backends import get_ops, set_current_ops, get_current_ops, use_ops
from .backends import Ops, CupyOps, NumpyOps, JaxOps, has_cupy, has_jax
from .backends import use_pytorch_for_gpu_memory, use_tensorflow_for_gpu_memory
from .layers import Dropout, Embed, expand_window, HashEmbed, LayerNorm, Linear
from .layers import Maxout, Mish, MultiSoftmax, ReLu, Softmax, LSTM
from .layers import CauchySimilarity, ParametricAttention, Logistic
from .layers import SparseLinear, StaticVectors, FeatureExtractor
from .layers import PyTorchWrapper, PyTorchRNNWrapper, PyTorchLSTM
from .layers import TensorFlowWrapper
from .layers import add, bidirectional, chain, clone, concatenate, noop
from .layers import residual, uniqued, siamese, list2ragged, ragged2list
from .layers import with_array, with_padded, with_list, with_ragged, with_flatten
from .layers import with_reshape, with_getitem, strings2arrays, list2array
from .layers import list2ragged, ragged2list, list2padded, padded2list, remap_ids
from .layers import reduce_max, reduce_mean, reduce_sum
__all__ = list(locals().keys())
| from .config import Config, registry
from .initializers import normal_init, uniform_init, glorot_uniform_init, zero_init
from .loss import CategoricalCrossentropy, L1Distance, CosineDistance
from .loss import SequenceCategoricalCrossentropy
from .model import Model, serialize_attr, deserialize_attr
from .model import set_dropout_rate, change_attr_values
from .shims import Shim, PyTorchShim, TensorFlowShim
from .optimizers import Adam, RAdam, SGD, Optimizer
from .schedules import cyclic_triangular, warmup_linear, constant, constant_then
from .schedules import decaying, slanted_triangular, compounding
from .types import Ragged, Padded, ArgsKwargs
from .util import fix_random_seed, is_cupy_array, set_active_gpu
from .util import prefer_gpu, require_gpu
from .util import get_shuffled_batches, minibatch, evaluate_model_on_arrays
from .util import to_categorical, get_width, get_array_module
from .util import torch2xp, xp2torch, tensorflow2xp, xp2tensorflow
from .backends import get_ops, set_current_ops, get_current_ops, use_ops
from .backends import Ops, CupyOps, NumpyOps, JaxOps, has_cupy, has_jax
from .backends import use_pytorch_for_gpu_memory, use_tensorflow_for_gpu_memory
from .layers import Dropout, Embed, expand_window, HashEmbed, LayerNorm, Linear
from .layers import Maxout, Mish, MultiSoftmax, ReLu, Softmax, LSTM
from .layers import CauchySimilarity, ParametricAttention, Logistic
from .layers import SparseLinear, StaticVectors, FeatureExtractor
from .layers import PyTorchWrapper, PyTorchRNNWrapper, PyTorchLSTM
from .layers import TensorFlowWrapper
from .layers import add, bidirectional, chain, clone, concatenate, noop
from .layers import residual, uniqued, siamese, list2ragged, ragged2list
from .layers import with_array, with_padded, with_list, with_ragged, with_flatten
from .layers import with_reshape, with_getitem, strings2arrays, list2array
from .layers import list2ragged, ragged2list, list2padded, padded2list, remap_ids
from .layers import reduce_max, reduce_mean, reduce_sum
__all__ = list(locals().keys())
| mit | Python |
e17beeb171c5716465404984581b4e74ceccacf6 | Work around pyflakes error introduced in r624 | libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar | libravatar/urls.py | libravatar/urls.py | # Copyright (C) 2010, 2011 Francois Marier <francois@libravatar.org>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=W0401,W0614
from django.conf.urls.defaults import patterns, include, handler404, handler500
handler404 # make pyflakes happy, pylint: disable=W0104
handler500 # make pyflakes happy, pylint: disable=W0104
urlpatterns = patterns('',
(r'^account/', include('libravatar.account.urls')),
(r'^openid/login/$', 'django_openid_auth.views.login_begin'),
(r'^openid/complete/$', 'django_openid_auth.views.login_complete'),
(r'^tools/', include('libravatar.tools.urls')),
(r'^$', 'libravatar.public.views.home'),
(r'^resize/', 'libravatar.public.views.resize'),
(r'^resolve/', 'libravatar.public.views.resolve'),
)
| # Copyright (C) 2010, 2011 Francois Marier <francois@libravatar.org>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=W0401,W0614
from django.conf.urls.defaults import patterns, include, handler404, handler500
urlpatterns = patterns('',
(r'^account/', include('libravatar.account.urls')),
(r'^openid/login/$', 'django_openid_auth.views.login_begin'),
(r'^openid/complete/$', 'django_openid_auth.views.login_complete'),
(r'^tools/', include('libravatar.tools.urls')),
(r'^$', 'libravatar.public.views.home'),
(r'^resize/', 'libravatar.public.views.resize'),
(r'^resolve/', 'libravatar.public.views.resolve'),
)
| agpl-3.0 | Python |
11381500eed6418151ba23990c0810582e625704 | fix logic in JError.custom | zloidemon/aiohttp_jrpc | aiohttp_jrpc/errors.py | aiohttp_jrpc/errors.py | """ Error responses """
from aiohttp.web import Response
import json
class JResponse(Response):
""" Modified Reponse from aohttp """
def __init__(self, *, status=200, reason=None,
headers=None, jsonrpc=None):
if jsonrpc is not None:
jsonrpc.update({'jsonrpc': '2.0'})
text = json.dumps(jsonrpc)
super().__init__(status=status, reason=reason, text=text,
headers=headers, content_type='application/json')
class JError(object):
""" Class with standart errors """
def __init__(self, data=None, rid=None):
if data is not None:
self.rid = data['id']
else:
self.rid = rid
def parse(self):
""" json parsing error """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32700, 'message': 'Parse error'},
})
def request(self):
""" incorrect json rpc request """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32600, 'message': 'Invalid Request'},
})
def method(self):
""" Not found method on the server """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32601, 'message': 'Method not found'},
})
def params(self):
""" Incorrect params (used in validate) """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32602, 'message': 'Invalid params'},
})
def internal(self):
""" Internal server error, actually send on every unknow exception """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32603, 'message': 'Internal error'},
})
def custom(self, code, message):
"""
Specific server side errors use: -32000 to -32099
reserved for implementation-defined server-errors
"""
if -32000 < code and -32099 > code:
code = -32603
message = 'Internal error'
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': code, 'message': message},
})
| """ Error responses """
from aiohttp.web import Response
import json
class JResponse(Response):
""" Modified Reponse from aohttp """
def __init__(self, *, status=200, reason=None,
headers=None, jsonrpc=None):
if jsonrpc is not None:
jsonrpc.update({'jsonrpc': '2.0'})
text = json.dumps(jsonrpc)
super().__init__(status=status, reason=reason, text=text,
headers=headers, content_type='application/json')
class JError(object):
""" Class with standart errors """
def __init__(self, data=None, rid=None):
if data is not None:
self.rid = data['id']
else:
self.rid = rid
def parse(self):
""" json parsing error """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32700, 'message': 'Parse error'},
})
def request(self):
""" incorrect json rpc request """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32600, 'message': 'Invalid Request'},
})
def method(self):
""" Not found method on the server """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32601, 'message': 'Method not found'},
})
def params(self):
""" Incorrect params (used in validate) """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32602, 'message': 'Invalid params'},
})
def internal(self):
""" Internal server error, actually send on every unknow exception """
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': -32603, 'message': 'Internal error'},
})
def custom(self, code, message):
"""
Specific server side errors use: -32000 to -32099
reserved for implementation-defined server-errors
"""
if -32000 < code or -32099 > code:
code = -32603
message = 'Internal error'
return JResponse(jsonrpc={
'id': self.rid,
'error': {'code': code, 'message': message},
})
| bsd-2-clause | Python |
48d9e4b3359c46116df2edd02345dc970449d351 | enable context aware commits (#126) | googleapis/google-cloud-java,googleapis/google-cloud-java,googleapis/google-cloud-java | java-recommender/synth.py | java-recommender/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool.languages.java as java
AUTOSYNTH_MULTIPLE_COMMITS = True
service = 'recommender'
versions = ['v1beta1', 'v1']
for version in versions:
library = java.bazel_library(
service=service,
version=version,
bazel_target=f'//google/cloud/{service}/{version}:google-cloud-{service}-{version}-java',
)
java.common_templates()
| # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool.languages.java as java
service = 'recommender'
versions = ['v1beta1', 'v1']
for version in versions:
library = java.bazel_library(
service=service,
version=version,
bazel_target=f'//google/cloud/{service}/{version}:google-cloud-{service}-{version}-java',
)
java.common_templates()
| apache-2.0 | Python |
fb5c6d5288b6d69d16f917976ae068e9c52a6c7d | Update to Python 2.3, getting rid of backward compatiblity crud. We don't need the _compat21 or _compat22 modules either. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/email/Iterators.py | Lib/email/Iterators.py | # Copyright (C) 2001-2004 Python Software Foundation
# Author: Barry Warsaw <barry@python.org>
"""Various types of useful iterators and generators.
"""
import sys
from cStringIO import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
# The Python 2.2 version uses generators for efficiency.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, basestring):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print >> fp, tab + msg.get_content_type(),
if include_default:
print '[%s]' % msg.get_default_type()
else:
print
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
| # Copyright (C) 2001,2002 Python Software Foundation
# Author: barry@zope.com (Barry Warsaw)
"""Various types of useful iterators and generators.
"""
import sys
try:
from email._compat22 import body_line_iterator, typed_subpart_iterator
except SyntaxError:
# Python 2.1 doesn't have generators
from email._compat21 import body_line_iterator, typed_subpart_iterator
def _structure(msg, fp=None, level=0):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print >> fp, tab + msg.get_content_type()
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1)
| mit | Python |
b4a57231b1b3d1209d8d7accaf26b58ec4278cd1 | Enable fixed plugins again. | weezel/BandEventNotifier | plugin_handler.py | plugin_handler.py | # -*- coding: utf-8 -*-
# Execute this file to see what plugins will be loaded.
# Implementation leans to Lex Toumbourou's example:
# https://lextoumbourou.com/blog/posts/dynamically-loading-modules-and-classes-in-python/
import os
import pkgutil
import sys
from typing import List
from venues.abstract_venue import AbstractVenue
def load_venue_plugins() -> List[AbstractVenue]:
"""
Read plugin directory and load found plugins.
Variable "blocklist" can be used to exclude loading certain plugins.
"""
blocklist = ["plugin_tiketti"]
found_blocked = list()
loadedplugins = list()
pluginspathabs = os.path.join(os.path.dirname(__file__), "venues")
for loader, plugname, ispkg in \
pkgutil.iter_modules(path=[pluginspathabs]):
if plugname in sys.modules or plugname == "abstract_venue":
continue
if plugname in blocklist:
found_blocked.append(plugname.lstrip("plugin_"))
continue
plugpath = f"venues.{plugname}"
loadplug = __import__(plugpath, fromlist=[plugname])
classname = plugname.split("_")[1].title()
loadedclass = getattr(loadplug, classname)
instance = loadedclass()
loadedplugins.append(instance)
print(f"Loaded plugin: {instance.get_venue_name()}")
print("Blocked plugins: {}.\n".format(", ".join(found_blocked[1:])))
return loadedplugins
| # -*- coding: utf-8 -*-
# Execute this file to see what plugins will be loaded.
# Implementation leans to Lex Toumbourou's example:
# https://lextoumbourou.com/blog/posts/dynamically-loading-modules-and-classes-in-python/
import os
import pkgutil
import sys
from typing import List
from venues.abstract_venue import AbstractVenue
def load_venue_plugins() -> List[AbstractVenue]:
"""
Read plugin directory and load found plugins.
Variable "blocklist" can be used to exclude loading certain plugins.
"""
blocklist = ["plugin_tiketti",
"plugin_yotalo",
"plugin_glivelabtampere",
"plugin_glivelabhelsinki"]
found_blocked = list()
loadedplugins = list()
pluginspathabs = os.path.join(os.path.dirname(__file__), "venues")
for loader, plugname, ispkg in \
pkgutil.iter_modules(path=[pluginspathabs]):
if plugname in sys.modules or plugname == "abstract_venue":
continue
if plugname in blocklist:
found_blocked.append(plugname.lstrip("plugin_"))
continue
plugpath = f"venues.{plugname}"
loadplug = __import__(plugpath, fromlist=[plugname])
classname = plugname.split("_")[1].title()
loadedclass = getattr(loadplug, classname)
instance = loadedclass()
loadedplugins.append(instance)
print(f"Loaded plugin: {instance.get_venue_name()}")
print("Blocked plugins: {}.\n".format(", ".join(found_blocked[1:])))
return loadedplugins
| isc | Python |
20f8a45c1c7369787a823012040342782b9596be | Add projection module to __init__.py | AmiiThinks/amii-tf-nn | amii_tf_nn/__init__.py | amii_tf_nn/__init__.py | from .criterion import *
from .classifier import *
from .data_set import *
from .data import *
from .debug import *
from .estimator import *
from .experiment import *
from .layer import *
from .monitored_estimator import *
from .network_model import *
from .tf_extra import *
from .trainer import *
from .projection import *
| from .criterion import *
from .classifier import *
from .data_set import *
from .data import *
from .debug import *
from .estimator import *
from .experiment import *
from .layer import *
from .monitored_estimator import *
from .network_model import *
from .tf_extra import *
from .trainer import *
| mit | Python |
edc644869daaf788bc4785446d76a322440c5714 | Update test. | dronecrew/px4tools | test/test_analysis.py | test/test_analysis.py | import os
import pandas
import unittest
import inspect
from px4tools.analysis import *
from px4tools.mapping import *
TEST_PATH = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
class Test(unittest.TestCase):
def test_process_data(self):
filename = os.path.join(TEST_PATH, 'log', '01_07_59.csv')
print("filename: {:s}".format(filename))
with open(filename, 'r') as f:
data = process_data(pandas.read_csv(f))
data = process_lpe_health(data)
data = project_lat_lon(data)
data = process_lpe_health(data)
find_meas_period(data['LPOS_VX'])
#all_new_sample(data['LPOS_VX'])
new_sample(data['LPOS_VX'])
find_lpe_gains(data, printing=True)
set_time_series(data)
get_auto_data(data)
get_float_data(data)
isfloatarray(data['LPOS_VX'])
octa_cox_data_to_ss(data)
filter_finite(data)
@unittest.skip("skip plotting test for CI")
def test_plotting(self):
filename = os.path.join(TEST_PATH, 'log', '01_07_59.csv')
print("filename: {:s}".format(filename))
with open(filename, 'r') as f:
data = process_data(pandas.read_csv(f))
data = process_lpe_health(data)
data = project_lat_lon(data)
alt_analysis(data)
statistics(data, ['LPOS_VX'], plot=True)
data = process_lpe_health(data)
plot_modes(data)
find_meas_period(data['LPOS_VX'])
plot_control_loops(data)
plot_position_loops(data)
plot_velocity_loops(data)
plot_attitude_rate_loops(data)
plot_attitude_loops(data)
plot_faults(data)
pos_analysis(data)
| import os
import matplotlib
if 'DISPLAY' not in os.environ:
matplotlib.use('Agg')
import matplotlib.pyplot
import pandas
import unittest
import inspect
from px4tools.analysis import *
from px4tools.mapping import *
TEST_PATH = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
class Test(unittest.TestCase):
def test_process_data(self):
filename = os.path.join(TEST_PATH, 'log', '01_07_59.csv')
print("filename: {:s}".format(filename))
with open(filename, 'r') as f:
data = process_data(pandas.read_csv(f))
data = process_lpe_health(data)
data = project_lat_lon(data)
alt_analysis(data)
statistics(data, ['LPOS_VX'], plot=True)
data = process_lpe_health(data)
plot_modes(data)
find_meas_period(data['LPOS_VX'])
#all_new_sample(data['LPOS_VX'])
new_sample(data['LPOS_VX'])
find_lpe_gains(data, printing=True)
plot_control_loops(data)
plot_position_loops(data)
plot_velocity_loops(data)
plot_attitude_rate_loops(data)
plot_attitude_loops(data)
plot_faults(data)
set_time_series(data)
get_auto_data(data)
get_float_data(data)
isfloatarray(data['LPOS_VX'])
pos_analysis(data)
octa_cox_data_to_ss(data)
filter_finite(data)
| bsd-3-clause | Python |
7404ba980569352149ee5cc27cb676474c69cea2 | remove index from sim-test-data | jburos/survivalstan,jburos/survivalstan | test/test_datasets.py | test/test_datasets.py | import statsmodels
import survivalstan
import random
random.seed(9001)
def load_test_dataset(n=50):
''' Load test dataset from R survival package
'''
dataset = statsmodels.datasets.get_rdataset(package='survival', dataname='flchain' )
d = dataset.data.query('futime > 7').sample(n=n)
d.reset_index(level=0, inplace=True)
d.rename(columns={'futime': 't', 'death': 'event'}, inplace=True)
return(d)
def sim_test_dataset(n=50):
dataset = survivalstan.sim.sim_data_exp_correlated(N=n, censor_time=10)
return(dataset)
def load_test_dataset_long(n=20):
''' Load test dataset from R survival package
'''
d = load_test_dataset(n=n)
dlong = survivalstan.prep_data_long_surv(d, time_col='t', event_col='event')
return dlong
def sim_test_dataset_long(n=20):
d = sim_test_dataset(n=n)
dlong = survivalstan.prep_data_long_surv(d, time_col='t', event_col='event')
return dlong
| import statsmodels
import survivalstan
import random
random.seed(9001)
def load_test_dataset(n=50):
''' Load test dataset from R survival package
'''
dataset = statsmodels.datasets.get_rdataset(package='survival', dataname='flchain' )
d = dataset.data.query('futime > 7').sample(n=n)
d.reset_index(level=0, inplace=True)
d.rename(columns={'futime': 't', 'death': 'event'}, inplace=True)
return(d)
def sim_test_dataset(n=50):
dataset = survivalstan.sim.sim_data_exp_correlated(N=n, censor_time=10)
dataset.reset_index(level=0, inplace=True)
return(dataset)
def load_test_dataset_long(n=20):
''' Load test dataset from R survival package
'''
d = load_test_dataset(n=n)
dlong = survivalstan.prep_data_long_surv(d, time_col='t', event_col='event')
return dlong
def sim_test_dataset_long(n=20):
d = sim_test_dataset(n=n)
dlong = survivalstan.prep_data_long_surv(d, time_col='t', event_col='event')
return dlong
| apache-2.0 | Python |
3489b2076faf101a1d834fa3ffcb2b92a88f2005 | Drop old symlinks | xaque208/dotfiles,xaque208/dotfiles,xaque208/dotfiles | bin/init.py | bin/init.py | #! /usr/bin/env python
import os
from dotfiles import Dotfiles
def main():
homedir = os.environ['HOME']
dotfilesRoot = homedir + '/dotfiles'
d = Dotfiles(dotfilesRoot)
d.setup()
if __name__ == "__main__":
main()
| #! /usr/bin/env python
import symlinks
import os
from dotfiles import Dotfiles
def main():
homedir = os.environ['HOME']
dotfilesRoot = homedir + '/dotfiles'
d = Dotfiles(dotfilesRoot)
d.setup()
if __name__ == "__main__":
main()
| mit | Python |
c62167939485954ede7f1e5e709741e80c9a1127 | Update create_sample_job.py | Tendrl/node_agent,r0h4n/node-agent,Tendrl/node-agent,r0h4n/node-agent,Tendrl/node_agent,Tendrl/node-agent,Tendrl/node-agent,r0h4n/node-agent | etc/create_sample_job.py | etc/create_sample_job.py | import json
import uuid
import etcd
job_id1 = str(uuid.uuid4())
job = {
"integration_id": "rohan_create_job_script_test",
"run": "tendrl.node_agent.flows.import_cluster.ImportCluster",
"status": "new",
"parameters": {
"TendrlContext.integration_id": "some_uuid",
"Node[]": ["9eced8a0-fd46-4144-9578-5b35c2ae2006"],
"DetectedCluster.sds_pkg_name": "gluster"
},
"type": "node",
"node_ids": ["9eced8a0-fd46-4144-9578-5b35c2ae2006"]
}
print "/queue/%s" % job_id1
client = etcd.Client(host="your_etcd_api_ip", port=2379)
client.write("/queue/%s" % job_id1, json.dumps(job))
| import json
import uuid
import etcd
job_id1 = str(uuid.uuid4())
job = {
"integration_id": "49fa2adde8a6e98591f0f5cb4bc5f44d",
"sds_type": "generic",
"flow": "ExecuteCommand",
"object_type": "generic",
"status": 'new',
"message": 'Executing command',
"attributes": {
"_raw_input": "ls"
},
"errors": {}
}
client = etcd.Client()
client.write("/api_job_queue/job_%s" % job_id1, json.dumps(job))
| lgpl-2.1 | Python |
267a1afaf27122954ce04220f55667109c6ccbf6 | change logic for source.url in case of tor_node | aaronkaplan/intelmq,certtools/intelmq,certtools/intelmq,certtools/intelmq,aaronkaplan/intelmq,aaronkaplan/intelmq | intelmq/bots/parsers/blueliv/parser_crimeserver.py | intelmq/bots/parsers/blueliv/parser_crimeserver.py | # -*- coding: utf-8 -*-
"""
"""
import json
from intelmq.lib import utils
from intelmq.lib.bot import Bot
TYPES = {
'PHISHING': 'phishing',
'MALWARE': 'malware',
'EXPLOIT_KIT': 'exploit',
'BACKDOOR': 'backdoor',
'TOR_IP': 'proxy',
'C_AND_C': 'c&c'
}
class BluelivCrimeserverParserBot(Bot):
def process(self):
report = self.receive_message()
raw_report = utils.base64_decode(report.get('raw'))
for item in json.loads(raw_report):
event = self.new_event(report)
tor_node = False
if 'type' in item:
event.add('classification.type', TYPES[item['type']])
if item['type'] == 'TOR_IP':
event.add('source.tor_node', True)
tor_node = True
if 'url' in item:
# crimeserver reports tor ips in url as well, skip those
raise_failure = False if tor_node else True
event.add('source.url', item['url'],
raise_failure=raise_failure)
if 'ip' in item:
event.add('source.ip', item['ip'])
if 'country' in item:
event.add('source.geolocation.cc', item['country'])
if 'firstSeenAt' in item:
event.add('time.source', item['firstSeenAt'][:-4] + '00:00')
event.add("raw", json.dumps(item, sort_keys=True))
self.send_message(event)
self.acknowledge_message()
BOT = BluelivCrimeserverParserBot
| # -*- coding: utf-8 -*-
"""
"""
import json
from intelmq.lib import utils
from intelmq.lib.bot import Bot
from intelmq.lib.harmonization import IPAddress
TYPES = {
'PHISHING': 'phishing',
'MALWARE': 'malware',
'EXPLOIT_KIT': 'exploit',
'BACKDOOR': 'backdoor',
'TOR_IP': 'proxy',
'C_AND_C': 'c&c'
}
class BluelivCrimeserverParserBot(Bot):
def process(self):
report = self.receive_message()
raw_report = utils.base64_decode(report.get('raw'))
for item in json.loads(raw_report):
event = self.new_event(report)
tor_node = False
if 'type' in item:
event.add('classification.type', TYPES[item['type']])
if item['type'] == 'TOR_IP':
event.add('source.tor_node', True)
tor_node = True
if 'url' in item:
# crimeserver reports tor ips in url as well, skip those
if not tor_node:
event.add('source.url', item['url'])
else:
valid_ip = IPAddress.is_valid(item['url'])
if not valid_ip:
event.add('source.url', item['url'])
if 'ip' in item:
event.add('source.ip', item['ip'])
if 'country' in item:
event.add('source.geolocation.cc', item['country'])
if 'firstSeenAt' in item:
event.add('time.source', item['firstSeenAt'][:-4] + '00:00')
event.add("raw", json.dumps(item, sort_keys=True))
self.send_message(event)
self.acknowledge_message()
BOT = BluelivCrimeserverParserBot
| agpl-3.0 | Python |
6aadd0ff991df18f59152f65d0332c913ba91b93 | add stats calc | wgerlach/pipeline,MG-RAST/pipeline,MG-RAST/pipeline,teharrison/pipeline,wgerlach/pipeline,MG-RAST/pipeline,wgerlach/pipeline,teharrison/pipeline,teharrison/pipeline | bin/extract_darkmatter.py | bin/extract_darkmatter.py | #!/usr/bin/env python
import os
import sys
import json
import shutil
import leveldb
import argparse
import subprocess
from Bio import SeqIO
def get_seq_stats(fname):
stats = {}
cmd = ["seq_length_stats.py", "-f", "-i", fname]
proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise IOError("%s\n%s"%(" ".join(cmd), stderr))
for line in stdout.strip().split("\n"):
parts = line.split("\t")
try:
val = int(parts[1])
except ValueError:
try:
val = float(parts[1])
except ValueError:
val = None
stats[parts[0]] = val
return stats
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", dest="input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", dest="output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="sims", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], "X")
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write(">%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ohdl.close()
ihdl.close()
jhdl = open(args.output+".json", 'w')
json.dump(get_seq_stats(args.output), jhdl)
jhdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| #!/usr/bin/env python
import argparse
import leveldb
import os
import shutil
import sys
from Bio import SeqIO
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", dest="input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", dest="output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="sims", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], "X")
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ihdl.close()
ohdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| bsd-2-clause | Python |
3702edd34881a95db656eb83a952a245a18edc62 | remove debug print | nipy/nireg,arokem/nipy,alexis-roche/nipy,arokem/nipy,alexis-roche/register,bthirion/nipy,bthirion/nipy,alexis-roche/nipy,nipy/nipy-labs,alexis-roche/nireg,alexis-roche/niseg,nipy/nireg,arokem/nipy,alexis-roche/niseg,alexis-roche/nireg,arokem/nipy,bthirion/nipy,alexis-roche/register,alexis-roche/register,alexis-roche/nipy,alexis-roche/nipy,bthirion/nipy,nipy/nipy-labs | lib/neuroimaging/__init__.py | lib/neuroimaging/__init__.py | """
Insert long description here.
"""
import re
from path import path
__version__ = "0.01a"
packages = (
'neuroimaging',
'neuroimaging.tests',
'neuroimaging.data',
'neuroimaging.data.tests',
'neuroimaging.fmri',
'neuroimaging.fmri.tests',
'neuroimaging.fmri.fmristat',
'neuroimaging.fmri.fmristat.tests',
'neuroimaging.image',
'neuroimaging.image.tests',
'neuroimaging.image.formats',
'neuroimaging.image.formats.tests',
'neuroimaging.refactoring',
'neuroimaging.refactoring.tests',
'neuroimaging.reference',
'neuroimaging.reference.tests',
'neuroimaging.statistics',
'neuroimaging.statistics.tests',
'neuroimaging.visualization',
'neuroimaging.visualization.cmap',
'neuroimaging.visualization.tests')
testmatch = re.compile(".*tests").search
nontest_packages = [p for p in packages if not testmatch(p)]
# modules to be pre-imported for convenience
_preload_modules = (
'neuroimaging.image.formats.analyze',
'neuroimaging.image.interpolation',
'neuroimaging.image.onesample',
'neuroimaging.image.regression',
'neuroimaging.reference.axis',
'neuroimaging.reference.coordinate_system',
'neuroimaging.reference.grid',
'neuroimaging.reference.grid_iterators',
'neuroimaging.reference.mapping',
'neuroimaging.reference.slices',
'neuroimaging.statistics.regression',
'neuroimaging.statistics.classification',
'neuroimaging.statistics.iterators',
'neuroimaging.statistics.contrast',
'neuroimaging.statistics.utils',
'neuroimaging.visualization.viewer',)
#-----------------------------------------------------------------------------
def ensuredirs(dir):
if not isinstance(dir, path): dir= path(dir)
if not dir.exists(): dir.makedirs()
#-----------------------------------------------------------------------------
def preload(packages=nontest_packages):
"""
Import the specified modules/packages (enabling fewer imports in client
scripts). By default, import all non-test packages:\n%s
and the following modules:\n%s
"""%("\n".join(nontest_packages),"\n".join(_preload_modules))
for package in packages: __import__(package, {}, {})
for module in _preload_modules: __import__(module, {}, {})
#-----------------------------------------------------------------------------
def import_from(modulename, objectname):
"Import and return objectname from modulename."
module = __import__(modulename, {}, {}, (objectname,))
try: return getattr(module, objectname)
except AttributeError: return None
# Always preload all packages. This should be removed as soon as the client
# scripts can be modified to call it themselves.
#preload()
| """
Insert long description here.
"""
import re
from path import path
__version__ = "0.01a"
packages = (
'neuroimaging',
'neuroimaging.tests',
'neuroimaging.data',
'neuroimaging.data.tests',
'neuroimaging.fmri',
'neuroimaging.fmri.tests',
'neuroimaging.fmri.fmristat',
'neuroimaging.fmri.fmristat.tests',
'neuroimaging.image',
'neuroimaging.image.tests',
'neuroimaging.image.formats',
'neuroimaging.image.formats.tests',
'neuroimaging.refactoring',
'neuroimaging.refactoring.tests',
'neuroimaging.reference',
'neuroimaging.reference.tests',
'neuroimaging.statistics',
'neuroimaging.statistics.tests',
'neuroimaging.visualization',
'neuroimaging.visualization.cmap',
'neuroimaging.visualization.tests')
testmatch = re.compile(".*tests").search
nontest_packages = [p for p in packages if not testmatch(p)]
# modules to be pre-imported for convenience
_preload_modules = (
'neuroimaging.image.formats.analyze',
'neuroimaging.image.interpolation',
'neuroimaging.image.onesample',
'neuroimaging.image.regression',
'neuroimaging.reference.axis',
'neuroimaging.reference.coordinate_system',
'neuroimaging.reference.grid',
'neuroimaging.reference.grid_iterators',
'neuroimaging.reference.mapping',
'neuroimaging.reference.slices',
'neuroimaging.statistics.regression',
'neuroimaging.statistics.classification',
'neuroimaging.statistics.iterators',
'neuroimaging.statistics.contrast',
'neuroimaging.statistics.utils',
'neuroimaging.visualization.viewer',)
#-----------------------------------------------------------------------------
def ensuredirs(dir):
if not isinstance(dir, path): dir= path(dir)
if not dir.exists(): dir.makedirs()
#-----------------------------------------------------------------------------
def preload(packages=nontest_packages):
"""
Import the specified modules/packages (enabling fewer imports in client
scripts). By default, import all non-test packages:\n%s
and the following modules:\n%s
"""%("\n".join(nontest_packages),"\n".join(_preload_modules))
for package in packages: __import__(package, {}, {})
for module in _preload_modules: __import__(module, {}, {})
#-----------------------------------------------------------------------------
def import_from(modulename, objectname):
"Import and return objectname from modulename."
print "import_from:",modulename,objectname
module = __import__(modulename, {}, {}, (objectname,))
try: return getattr(module, objectname)
except AttributeError: return None
# Always preload all packages. This should be removed as soon as the client
# scripts can be modified to call it themselves.
#preload()
| bsd-3-clause | Python |
3fcc6e4fe09ecaa27e5863b2650df101fc605db8 | Update Chapter05/PracticeQuestions/Question3.py added docstring | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/CrackingCodesWithPython/Chapter05/PracticeQuestions/Question3.py | books/CrackingCodesWithPython/Chapter05/PracticeQuestions/Question3.py | """ Chapter 5 Practice Question 3
Which Python instruction would import a module named watermelon.py?
Note:
Contains spoilers for Chapter 7 (functions)
"""
import books.CrackingCodesWithPython.Chapter05.PracticeQuestions.watermelon as watermelon
def main():
watermelon.nutrition()
# If Question3.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| # Which Python instruction would import a module named watermelon.py?
import books.CrackingCodesWithPython.Chapter05.PracticeQuestions.watermelon as watermelon
def main():
watermelon.nutrition()
# If Question3.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| mit | Python |
e42b1d4f673a43f0a0bc73723c697524e08ffdc7 | Change back to 1 second | materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org | backend/scripts/db_running.py | backend/scripts/db_running.py | #!/usr/bin/env python
#
# db_running.py will attempt to connect to the database. It will retry --retry times (defaults to 100), and will
# sleep for --sleep time (defaults to 1 second). It connects to the database on --port (defaults to 30815).
# db_running.py will exit 0 if is successfully connects. It will exit 1 if it cannot connect within the number of
# retry attempts. It will also exit 1 if --retry or --sleep are improperly specified.
import optparse
import sys
import time
import rethinkdb as r
def connect(port):
try:
r.connect('localhost', port, db="materialscommons")
return True
except:
return False
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815)
parser.add_option("-r", "--retry", dest="retry", type="int", help="number of retries", default=1000)
parser.add_option("-s", "--sleep", dest="sleep", type="int", help="sleep time between retries", default="1")
(options, args) = parser.parse_args()
if options.retry < 1:
print("Retry count must be a positive number: %d" % options.retry)
sys.exit(1)
if options.sleep < 1:
print("Sleep time must be a positive number: %d" % options.sleep)
sys.exit(1)
for i in range(0, options.retry):
# print("Attempting to connect to rethinkdb on port %d (%d)" % (options.port, i) )
if connect(options.port):
sys.exit(0)
time.sleep(options.sleep)
sys.exit(1)
| #!/usr/bin/env python
#
# db_running.py will attempt to connect to the database. It will retry --retry times (defaults to 100), and will
# sleep for --sleep time (defaults to 1 second). It connects to the database on --port (defaults to 30815).
# db_running.py will exit 0 if is successfully connects. It will exit 1 if it cannot connect within the number of
# retry attempts. It will also exit 1 if --retry or --sleep are improperly specified.
import optparse
import sys
import time
import rethinkdb as r
def connect(port):
try:
r.connect('localhost', port, db="materialscommons")
return True
except:
return False
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815)
parser.add_option("-r", "--retry", dest="retry", type="int", help="number of retries", default=1000)
parser.add_option("-s", "--sleep", dest="sleep", type="int", help="sleep time between retries", default="3")
(options, args) = parser.parse_args()
if options.retry < 1:
print("Retry count must be a positive number: %d" % options.retry)
sys.exit(1)
if options.sleep < 1:
print("Sleep time must be a positive number: %d" % options.sleep)
sys.exit(1)
for i in range(0, options.retry):
# print("Attempting to connect to rethinkdb on port %d (%d)" % (options.port, i) )
if connect(options.port):
sys.exit(0)
time.sleep(options.sleep)
sys.exit(1)
| mit | Python |
1ef8508790071b8d19b0da5e3dde1f48eb9e9479 | add numpy | gordon-n-stevenson/colormap_to_ITKSNAP | runColorMap.py | runColorMap.py | import matplotlib.cm as cmap
import numpy as np
import ColorMapWriter
mpl_map = cmap.viridis(np.arange(256))
c = ColorMapWriter.ColorMapWriter(mpl_map, 'viridis.xml', 5)
c.createLookupTable()
| import matplotlib.cm as cmap
import ColorMapWriter
mpl_map = cmap.viridis(np.arange(256))
c = ColorMapWriter.ColorMapWriter(mpl_map, 'viridis.xml', 5)
c.createLookupTable() | mit | Python |
f8479aa9b3416921d7d387efacedf8a36e4027fd | add cat /etc/ansible/hosts | le9i0nx/ansible-role-test | bin/virt.py | bin/virt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
import subprocess
import os
import sys
def proc(cmd,sh = True ):
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=sh)
p.wait()
outs, errs = p.communicate()
if p.returncode:
print(errs)
sys.exit(1)
return outs,errs,p
def cmd_list_proc(list_proc):
for item in list_proc:
print("$ {}".format(item))
out = proc(item)
print("{}".format(out[0]))
def job(dockerf, dis , ver):
o1_cmd = "docker run --name {0}_{1} -d --cap-add=SYS_ADMIN -it -v /sys/fs/cgroup:/sys/fs/cgroup:ro le9i0nx/ansible-role-test:{0}-{1}".format(dis,ver)
print("$ {}".format(o1_cmd))
o1 = proc(o1_cmd)
print("{}".format(o1[0]))
return
REPO = os.environ['TRAVIS_REPO_SLUG'].split('/')[1]
PWD = os.environ['PWD']
cmd_list = [
"sudo ln -s {}/test/{} /etc/ansible".format(PWD,REPO),
]
cmd_list_proc(cmd_list)
with open('meta/main.yml', 'r') as f:
doc = yaml.load(f)
ROOT_PATH=os.path.dirname(__file__)
for i in doc["galaxy_info"]["platforms"]:
distrib = i["name"].lower()
for x in i["versions"]:
dockerfile = "{}/../dockerfile/{}/{}/Dockerfile".format(ROOT_PATH,distrib,x)
if os.path.exists(dockerfile):
job(dockerfile, distrib, x)
else:
print("Critical error. Not found docker files {}".format(dockerfile))
sys.exit(1)
cmd_list =[
"sudo apt-get update",
"sudo apt-get install -qq sshpass",
"ssh-keygen -b 2048 -t rsa -f $HOME/.ssh/id_rsa -q -N \"\"",
"sleep 10",
"docker inspect --format '{{.Name}} ansible_host={{.NetworkSettings.IPAddress}} ansible_user=root' `docker ps -q` | sed -e 's/^.\{1\}//' >> /etc/ansible/hosts",
"cat /etc/ansible/hosts",
]
cmd_list_proc(cmd_list)
for item in proc("docker inspect --format '{{ .NetworkSettings.IPAddress }}' `docker ps -q`")[0].splitlines():
cmd_list = [
"ssh-keyscan -H {} >> ~/.ssh/known_hosts".format(item),
"sshpass -p '000000' ssh-copy-id root@{}".format(item),
]
cmd_list_proc(cmd_list)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
import subprocess
import os
import sys
def proc(cmd,sh = True ):
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=sh)
p.wait()
outs, errs = p.communicate()
if p.returncode:
print(errs)
sys.exit(1)
return outs,errs,p
def cmd_list_proc(list_proc):
for item in list_proc:
print("$ {}".format(item))
out = proc(item)
print("{}".format(out[0]))
def job(dockerf, dis , ver):
o1_cmd = "docker run --name {0}_{1} -d --cap-add=SYS_ADMIN -it -v /sys/fs/cgroup:/sys/fs/cgroup:ro le9i0nx/ansible-role-test:{0}-{1}".format(dis,ver)
print("$ {}".format(o1_cmd))
o1 = proc(o1_cmd)
print("{}".format(o1[0]))
return
REPO = os.environ['TRAVIS_REPO_SLUG'].split('/')[1]
PWD = os.environ['PWD']
cmd_list = [
"sudo ln -s {}/test/{} /etc/ansible".format(PWD,REPO),
"sudo apt-get update",
"sudo apt-get install -qq sshpass",
"ssh-keygen -b 2048 -t rsa -f $HOME/.ssh/id_rsa -q -N \"\"",
]
cmd_list_proc(cmd_list)
with open('meta/main.yml', 'r') as f:
doc = yaml.load(f)
ROOT_PATH=os.path.dirname(__file__)
for i in doc["galaxy_info"]["platforms"]:
distrib = i["name"].lower()
for x in i["versions"]:
dockerfile = "{}/../dockerfile/{}/{}/Dockerfile".format(ROOT_PATH,distrib,x)
if os.path.exists(dockerfile):
job(dockerfile, distrib, x)
else:
print("Critical error. Not found docker files {}".format(dockerfile))
sys.exit(1)
cmd_list =[
"sleep 10",
"docker inspect --format '{{.Name}} ansible_host={{.NetworkSettings.IPAddress}} ansible_user=root' `docker ps -q` | sed -e 's/^.\{1\}//' >> /etc/ansible/hosts",
]
cmd_list_proc(cmd_list)
for item in proc("docker inspect --format '{{ .NetworkSettings.IPAddress }}' `docker ps -q`")[0].splitlines():
cmd_list = [
"ssh-keyscan -H {} >> ~/.ssh/known_hosts".format(item),
"sshpass -p '000000' ssh-copy-id root@{}".format(item),
]
cmd_list_proc(cmd_list)
| mit | Python |
0cbd437b536f9f24eabc1ce63282512016326a08 | Set extra libraries on windows condition only | etaylor8086/prime-randomizer-web,etaylor8086/prime-randomizer-web,etaylor8086/prime-randomizer-web,etaylor8086/prime-randomizer-web,etaylor8086/prime-randomizer-web | binding.gyp | binding.gyp | {
"targets": [
{
"target_name": "randomprime",
"sources": [ "./native/randomprime.c" ],
"conditions": [
["OS=='win'", {
"libraries": [
"-l<(module_root_dir)/native/lib/randomprime",
"-lcredui.lib",
"-lmsimg32.lib",
"-lopengl32.lib",
"-lsecur32.lib",
"-lsetupapi.lib",
"-lws2_32.lib",
"-luserenv.lib",
"-lmsvcrt.lib"
]
}]
]
}
]
}
| {
"targets": [
{
"target_name": "randomprime",
"sources": [ "./native/randomprime.c" ],
"libraries": [
"-l<(module_root_dir)/native/lib/randomprime",
"-lcredui.lib",
"-lmsimg32.lib",
"-lopengl32.lib",
"-lsecur32.lib",
"-lsetupapi.lib",
"-lws2_32.lib",
"-luserenv.lib",
"-lmsvcrt.lib"
]
}
]
} | mit | Python |
3c9a9c0729b8c6dc0ecc70f307de07543eeba098 | Remove debug output | tyewang/invoke,frol/invoke,mattrobenolt/invoke,pfmoore/invoke,frol/invoke,kejbaly2/invoke,pyinvoke/invoke,pfmoore/invoke,pyinvoke/invoke,mkusz/invoke,singingwolfboy/invoke,alex/invoke,sophacles/invoke,mattrobenolt/invoke,mkusz/invoke,kejbaly2/invoke | tests/_support/foo.py | tests/_support/foo.py | from invoke.task import task
@task
def mytask():
pass
| from invoke.task import task
@task
def mytask():
pass
print mytask
print mytask.is_invoke_task
| bsd-2-clause | Python |
acc192111fae6590bcf52bfae292606e985c1102 | fix script to run on individual asset as well | kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi | kobo/apps/subsequences/scripts/repop_known_cols.py | kobo/apps/subsequences/scripts/repop_known_cols.py | # coding: utf-8
'''
Usage:
python manage.py runscript repop_known_cols --script-args=<assetUid>
'''
import re
import json
# from pprint import pprint
from kpi.models.asset import Asset
from kobo.apps.subsequences.models import SubmissionExtras
from kobo.apps.subsequences.utils.parse_knowncols import parse_knowncols
from kobo.apps.subsequences.utils.determine_export_cols_with_values import (
determine_export_cols_with_values,
)
def migrate_subex_content(sub_ex):
content_string = json.dumps(sub_ex.content)
if '"translated"' in content_string:
content_string = content_string.replace('"translated"', '"translation"')
sub_ex.content = json.loads(content_string)
print('submission_extra has old content')
sub_ex.save()
def migrate_subex_content_for_asset(asset):
for sub_ex in asset.submission_extras.all():
migrate_subex_content(sub_ex)
def repop_asset_knowncols(asset):
print(f'for_asset: {asset.uid}')
print(' before:')
print(' - ' + '\n - '.join(sorted(asset.known_cols)))
known_cols = determine_export_cols_with_values(asset.submission_extras.all())
asset.known_cols = known_cols
if 'translated' in asset.advanced_features:
asset.advanced_features['translation'] = asset.advanced_features['translated']
del asset.advanced_features['translated']
asset.save(create_version=False)
print(' after:')
print(' - ' + '\n - '.join(sorted(known_cols)))
def run(asset_uid=None):
if asset_uid is None:
id_key = 'asset_id'
asset_ids = list(
set(
[a['asset_id'] for a in SubmissionExtras.objects.all().values('asset_id')]
)
)
for asset_id in asset_ids:
asset = Asset.objects.get(id=asset_id)
migrate_subex_content_for_asset(asset)
repop_asset_knowncols(asset)
else:
asset = Asset.objects.get(uid=asset_uid)
migrate_subex_content_for_asset(asset)
repop_asset_knowncols(asset)
| # coding: utf-8
'''
Usage:
python manage.py runscript repop_known_cols --script-args=<assetUid>
'''
import re
import json
from pprint import pprint
from kpi.models.asset import Asset
from kobo.apps.subsequences.models import SubmissionExtras
from kobo.apps.subsequences.utils.parse_knowncols import parse_knowncols
from kobo.apps.subsequences.utils.determine_export_cols_with_values import (
determine_export_cols_with_values,
)
def migrate_subex_content(sub_ex):
content_string = json.dumps(sub_ex.content)
if '"translated"' in content_string:
content_string = content_string.replace('"translated"', '"translation"')
sub_ex.content = json.loads(content_string)
print('submission_extra has old content')
sub_ex.save()
def repop_asset_knowncols(asset):
print(f'for_asset: {asset.uid}')
print(' before:')
print(' - ' + '\n - '.join(sorted(asset.known_cols)))
known_cols = determine_export_cols_with_values(asset.submission_extras.all())
asset.known_cols = known_cols
if 'translated' in asset.advanced_features:
asset.advanced_features['translation'] = asset.advanced_features['translated']
del asset.advanced_features['translated']
asset.save(create_version=False)
print(' after:')
print(' - ' + '\n - '.join(sorted(known_cols)))
def run(asset_uid=None):
for sub_ex in SubmissionExtras.objects.all():
migrate_subex_content(sub_ex)
if asset_uid is None:
id_key = 'asset_id'
asset_ids = list(
set(
[a['asset_id'] for a in SubmissionExtras.objects.all().values('asset_id')]
)
)
for asset_id in asset_ids:
asset = Asset.objects.get(id=asset_id)
repop_asset_knowncols(asset)
else:
asset = Asset.objects.get(asset_uid=asset_uid)
repop_asset_knowncols(asset)
| agpl-3.0 | Python |
bd4a6e79725acb7b89f4c1382f885c401aa30690 | Use database context manager for sybase fetch | sot/mica,sot/mica | mica/web/star_hist.py | mica/web/star_hist.py | from astropy.table import Table
from mica.stats import acq_stats
from Ska.DBI import DBI
def get_acq_data(agasc_id):
"""
Fetch acquisition history from mica acq stats for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of acquisitions
"""
acq = Table(acq_stats.get_stats())
acq_star = acq[acq['agasc_id'] == agasc_id]
# make list of dicts for use in light templates in kadi web app
acq_table = []
acq_star.sort('guide_start')
for s in acq_star:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot', 'mag', 'mag_obs', 'star_tracked']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['guide_start']
srec['acq_dy'] = s['cdy']
srec['acq_dz'] = s['cdz']
srec['id'] = s['acqid']
acq_table.append(srec)
return acq_table
def get_gui_data(agasc_id):
"""
Fetch guide/track history from Sybase for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of uses as guide stars
"""
with DBI(dbi='sybase', server='sybase', user='aca_read') as db:
gui = db.fetchall('select * from trak_stats_data where id = {}'.format(
agasc_id))
if not len(gui):
return []
gui = Table(gui)
gui.sort('kalman_datestart')
# make list of dicts for use in light templates in kadi web app
gui_table = []
for s in gui:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['kalman_datestart']
srec['mag'] = s['mag_exp']
srec['mag_obs'] = s['aoacmag_mean']
srec['perc_not_track'] = s['not_tracking_samples'] * 100.0 / s['n_samples']
gui_table.append(srec)
return gui_table
def get_star_stats(agasc_id):
"""
Fetch acq and gui history of a star
:param agasc_id: AGASC id
:returns: 2 lists, first of acq attempts, second of guide attempts
"""
acq_table = get_acq_data(agasc_id)
gui_table = get_gui_data(agasc_id)
return acq_table, gui_table
| from astropy.table import Table
from mica.stats import acq_stats
from Ska.DBI import DBI
def get_acq_data(agasc_id):
"""
Fetch acquisition history from mica acq stats for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of acquisitions
"""
acq = Table(acq_stats.get_stats())
acq_star = acq[acq['agasc_id'] == agasc_id]
# make list of dicts for use in light templates in kadi web app
acq_table = []
acq_star.sort('guide_start')
for s in acq_star:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot', 'mag', 'mag_obs', 'star_tracked']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['guide_start']
srec['acq_dy'] = s['cdy']
srec['acq_dz'] = s['cdz']
srec['id'] = s['acqid']
acq_table.append(srec)
return acq_table
def get_gui_data(agasc_id):
"""
Fetch guide/track history from Sybase for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of uses as guide stars
"""
db = DBI(dbi='sybase', server='sybase', user='aca_read')
gui = db.fetchall('select * from trak_stats_data where id = {}'.format(
agasc_id))
gui = Table(gui)
gui.sort('kalman_datestart')
# make list of dicts for use in light templates in kadi web app
gui_table = []
for s in gui:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['kalman_datestart']
srec['mag'] = s['mag_exp']
srec['mag_obs'] = s['aoacmag_mean']
srec['perc_not_track'] = s['not_tracking_samples'] * 100.0 / s['n_samples']
gui_table.append(srec)
return gui_table
def get_star_stats(agasc_id):
"""
Fetch acq and gui history of a star
:param agasc_id: AGASC id
:returns: 2 lists, first of acq attempts, second of guide attempts
"""
acq_table = get_acq_data(agasc_id)
gui_table = get_gui_data(agasc_id)
return acq_table, gui_table
| bsd-3-clause | Python |
0e01b0a8845e0309cbb7131a35eacc1b87ded3f1 | Update version to 1.1.0 | gunthercox/chatterbot-corpus | chatterbot_corpus/__init__.py | chatterbot_corpus/__init__.py | """
A machine readable multilingual dialog corpus.
"""
from .corpus import Corpus
__version__ = '1.1.0'
__author__ = 'Gunther Cox'
__email__ = 'gunthercx@gmail.com'
__url__ = 'https://github.com/gunthercox/chatterbot-corpus'
__all__ = (
'Corpus',
)
| """
A machine readable multilingual dialog corpus.
"""
from .corpus import Corpus
__version__ = '1.0.1'
__author__ = 'Gunther Cox'
__email__ = 'gunthercx@gmail.com'
__url__ = 'https://github.com/gunthercox/chatterbot-corpus'
__all__ = (
'Corpus',
)
| bsd-3-clause | Python |
2674338d6896b62485f33b049c85ae3fedfefc9d | bump to version 0.8.1 | ndawe/rootpy,ndawe/rootpy,ndawe/rootpy,rootpy/rootpy,kreczko/rootpy,kreczko/rootpy,rootpy/rootpy,kreczko/rootpy,rootpy/rootpy | rootpy/info.py | rootpy/info.py | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
_
_ __ ___ ___ | |_ _ __ _ _
| '__/ _ \ / _ \| __| '_ \| | | |
| | | (_) | (_) | |_| |_) | |_| |
|_| \___/ \___/ \__| .__/ \__, |
|_| |___/
{0}
"""
__version__ = '0.8.1.dev0'
__url__ = 'http://rootpy.github.com/rootpy'
__repo_url__ = 'https://github.com/rootpy/rootpy/'
__download_url__ = ('http://pypi.python.org/packages/source/r/'
'rootpy/rootpy-{0}.tar.gz').format(__version__)
__doc__ = __doc__.format(__version__)
| # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
_
_ __ ___ ___ | |_ _ __ _ _
| '__/ _ \ / _ \| __| '_ \| | | |
| | | (_) | (_) | |_| |_) | |_| |
|_| \___/ \___/ \__| .__/ \__, |
|_| |___/
{0}
"""
__version__ = '0.8.0.dev0'
__url__ = 'http://rootpy.github.com/rootpy'
__repo_url__ = 'https://github.com/rootpy/rootpy/'
__download_url__ = ('http://pypi.python.org/packages/source/r/'
'rootpy/rootpy-{0}.tar.gz').format(__version__)
__doc__ = __doc__.format(__version__)
| bsd-3-clause | Python |
cc1d5ab4a863bfcb69b68c6d56743c7efd079b7a | Fix shap version (#1053) | kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts | python/alibiexplainer/setup.py | python/alibiexplainer/setup.py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
tests_require = [
'pytest',
'pytest-tornasync',
'mypy'
]
setup(
name='alibiexplainer',
version='0.4.0',
author_email='cc@seldon.io',
license='../../LICENSE.txt',
url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',
description='Model Explaination Server. \
Not intended for use outside KFServing Frameworks Images',
long_description=open('README.md').read(),
python_requires='>=3.6',
packages=find_packages("alibiexplainer"),
install_requires=[
"shap==0.35",
"kfserving>=0.4.0",
"alibi==0.4.0",
"scikit-learn>=0.20.3",
"argparse>=1.4.0",
"requests>=2.22.0",
"joblib>=0.13.2",
"pandas>=0.24.2",
"numpy>=1.16.3",
"dill>=0.3.0",
"spacy>=2.1.4"
],
tests_require=tests_require,
extras_require={'test': tests_require}
)
| # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
tests_require = [
'pytest',
'pytest-tornasync',
'mypy'
]
setup(
name='alibiexplainer',
version='0.4.0',
author_email='cc@seldon.io',
license='../../LICENSE.txt',
url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',
description='Model Explaination Server. \
Not intended for use outside KFServing Frameworks Images',
long_description=open('README.md').read(),
python_requires='>=3.6',
packages=find_packages("alibiexplainer"),
install_requires=[
"kfserving>=0.4.0",
"alibi==0.4.0",
"scikit-learn>=0.20.3",
"argparse>=1.4.0",
"requests>=2.22.0",
"joblib>=0.13.2",
"pandas>=0.24.2",
"numpy>=1.16.3",
"dill>=0.3.0",
"spacy>=2.1.4"
],
tests_require=tests_require,
extras_require={'test': tests_require}
)
| apache-2.0 | Python |
35114cf05ebdc12f6ec0a1ea8f647c8a5f4bc03b | Fix verb relation parsing | tomshen/sherlock,tomshen/sherlock | parse.py | parse.py | #!/usr/bin/env python
import re
import sys
import string
import unicodedata
from textblob import TextBlob
import grammars
import util
def preprocess(doc):
paragraphs = [s.strip() for s in doc.split('\n') if s.strip()][1:] # strip out title
return TextBlob('\n'.join(paragraphs))
def extract_generic_relations(sentence):
relations = []
noun_phrases = sentence.noun_phrases
words = sentence.words
new_noun_phrases = []
for np in noun_phrases:
try:
if ' ' in np:
nnp = ' '.join([words[words.lower().index(w)]
for w in str(np).split(' ')])
else:
nnp = words[words.lower().index(np)]
new_noun_phrases.append(nnp)
except:
continue
noun_phrases = new_noun_phrases
sentiment = sentence.sentiment.polarity
verbs = [w for w, pos in sentence.tags if pos[0] == 'V']
for i in xrange(len(noun_phrases)-1):
np = noun_phrases[i]
next_np = noun_phrases[i+1]
cur_idx = words.index(np.split(' ')[0])
next_idx = words.index(next_np.split(' ')[0])
verb_relation = words[cur_idx+len(np.split(' ')):next_idx]
if len(verb_relation) > 0:
relations.append((np, next_np, verb_relation,
sentiment, 1.0, sentence.tags[next_idx:next_idx+len(next_np.split(' '))]))
return relations
BAD_PUNC = set(string.punctuation) - set([',', ';', ':', '.', '!', '?'])
def basic_parse(doc):
blob = preprocess(doc)
sentences = blob.sentences
database = {}
for sentence in sentences:
rels = extract_generic_relations(sentence)
for key, val, relation, sentiment, certainty, pos in rels:
database[key] = database.get(key, {})
database[key][val] = {
'relation': relation,
'certainty': certainty,
'sentiment': sentiment,
'pos': pos
}
return database
| #!/usr/bin/env python
import re
import sys
import string
import unicodedata
from textblob import TextBlob
import grammars
import util
def preprocess(doc):
paragraphs = [s.strip() for s in doc.split('\n') if s.strip()][1:] # strip out title
return TextBlob('\n'.join(paragraphs))
def extract_generic_relations(sentence):
relations = []
noun_phrases = sentence.noun_phrases
words = sentence.words
new_noun_phrases = []
for np in noun_phrases:
try:
if ' ' in np:
nnp = ' '.join([words[words.lower().index(w)]
for w in str(np).split(' ')])
else:
nnp = words[words.lower().index(np)]
new_noun_phrases.append(nnp)
except:
continue
noun_phrases = new_noun_phrases
sentiment = sentence.sentiment.polarity
verbs = [w for w, pos in sentence.tags if pos[0] == 'V']
for i in xrange(len(noun_phrases)-1):
np = noun_phrases[i]
next_np = noun_phrases[i+1]
cur_idx = words.index(np.split(' ')[0])
next_idx = words.index(next_np.split(' ')[0])
relations.append((np, next_np, words[cur_idx+1:next_idx],
sentiment, 1.0, sentence.tags[next_idx:next_idx+len(next_np.split(' '))]))
return relations
BAD_PUNC = set(string.punctuation) - set([',', ';', ':', '.', '!', '?'])
def basic_parse(doc):
blob = preprocess(doc)
sentences = blob.sentences
database = {}
for sentence in sentences:
rels = extract_generic_relations(sentence)
for key, val, relation, sentiment, certainty, pos in rels:
database[key] = database.get(key, {})
database[key][val] = {
'relation': relation,
'certainty': certainty,
'sentiment': sentiment,
'pos': pos
}
return database
| mit | Python |
0a99166bb035b662ca40224ea48847dc0e86edf9 | Switch to using `unittest` directly. | jakirkham/rank_filter,DudLab/rank_filter,jakirkham/rank_filter,nanshe-org/rank_filter,DudLab/rank_filter,nanshe-org/rank_filter,jakirkham/rank_filter,DudLab/rank_filter,nanshe-org/rank_filter | rank_filter.recipe/run_test.py | rank_filter.recipe/run_test.py | #!/usr/bin/env python
import os
import subprocess
import sys
import nose
def main(*argv):
argv = list(argv)
test = os.path.join(os.environ["SRC_DIR"], "test", "test_rank_filter.py")
return(subprocess.check_call(["python", "-m", "unittest"] + argv[1:],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr))
if __name__ == "__main__":
sys.exit(main(*sys.argv))
| #!/usr/bin/env python
import os
import subprocess
import sys
import nose
def main(*argv):
argv = list(argv)
test = os.path.join(os.environ["SRC_DIR"], "test", "test_rank_filter.py")
return(subprocess.check_call(["python", nose.core.__file__, test] + argv[1:],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr))
if __name__ == "__main__":
sys.exit(main(*sys.argv))
| bsd-3-clause | Python |
213148d5732eee7bfbc9cda0f6fb36ae36d947ce | fix args | Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client | binstar_client/scripts/cli.py | binstar_client/scripts/cli.py | '''
Binstar command line utility
'''
from __future__ import print_function, unicode_literals
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import logging
from os import makedirs
from os.path import join, exists
from binstar_client import __version__ as version
from binstar_client import commands as command_module
from binstar_client.commands.login import interactive_login
from binstar_client.errors import ShowHelp, Unauthorized
from binstar_client.utils import USER_LOGDIR
from binstar_client.utils.handlers import syslog_handler
from clyent import add_default_arguments, add_subparser_modules
from clyent.logs import setup_logging
logger = logging.getLogger('binstar')
def add_syslog_handler():
hndlr = syslog_handler('binstar-client')
binstar_logger = logging.getLogger()
binstar_logger.setLevel(logging.INFO)
binstar_logger.addHandler(hndlr)
def binstar_main(sub_command_module, args=None, exit=True, description=None, version=None):
parser = ArgumentParser(description=description,
formatter_class=RawDescriptionHelpFormatter)
add_default_arguments(parser, version)
parser.add_argument('-t', '--token')
parser.add_argument('-s', '--site',
help='select the binstar site to use', default=None)
add_subparser_modules(parser, sub_command_module)
if not exists(USER_LOGDIR): makedirs(USER_LOGDIR)
logfile = join(USER_LOGDIR, 'cli.log')
args = parser.parse_args(args)
setup_logging(logger, args.log_level, use_color=args.color,
logfile=logfile, show_tb=args.show_traceback)
add_syslog_handler()
try:
try:
if not hasattr(args, 'main'):
parser.error("A sub command must be given. To show all available sub commands, run:\n\n\t binstar -h\n")
return args.main(args)
except Unauthorized:
if not args.token:
logger.info('The action you are performing requires authentication, please sign in:')
interactive_login(args)
return args.main(args)
else:
raise
except ShowHelp:
args.sub_parser.print_help()
if exit:
raise SystemExit(1)
else:
return 1
def main(args=None, exit=True):
binstar_main(command_module, args, exit,
description=__doc__, version=version)
| '''
Binstar command line utility
'''
from __future__ import print_function, unicode_literals
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import logging
from os import makedirs
from os.path import join, exists
from binstar_client import __version__ as version
from binstar_client import commands as command_module
from binstar_client.commands.login import interactive_login
from binstar_client.errors import ShowHelp, Unauthorized
from binstar_client.utils import USER_LOGDIR
from binstar_client.utils.handlers import syslog_handler
from clyent import add_default_arguments, add_subparser_modules
from clyent.logs import setup_logging
logger = logging.getLogger('binstar')
def add_syslog_handler():
hndlr = syslog_handler('binstar-client')
binstar_logger = logging.getLogger()
binstar_logger.setLevel(logging.INFO)
binstar_logger.addHandler(hndlr)
def binstar_main(sub_command_module, args=None, exit=True, description=None, version=None):
parser = ArgumentParser(description=description,
formatter_class=RawDescriptionHelpFormatter)
add_default_arguments(parser, version)
parser.add_argument('-t', '--token')
parser.add_argument('-s', '--site',
help='select the binstar site to use', default=None)
add_subparser_modules(parser, sub_command_module)
if not exists(USER_LOGDIR): makedirs(USER_LOGDIR)
logfile = join(USER_LOGDIR, 'cli.log')
args = parser.parse_args()
setup_logging(logger, args.log_level, use_color=args.color,
logfile=logfile, show_tb=args.show_traceback)
add_syslog_handler()
try:
try:
if not hasattr(args, 'main'):
parser.error("A sub command must be given. To show all available sub commands, run:\n\n\t binstar -h\n")
return args.main(args)
except Unauthorized:
if not args.token:
logger.info('The action you are performing requires authentication, please sign in:')
interactive_login(args)
return args.main(args)
else:
raise
except ShowHelp:
args.sub_parser.print_help()
if exit:
raise SystemExit(1)
else:
return 1
def main(args=None, exit=True):
binstar_main(command_module, args, exit,
description=__doc__, version=version)
| bsd-3-clause | Python |
79df0eccd44c6c01d69725b9cc7a851ab2be957c | add body property to request. | korakon/glaso | glaso/request.py | glaso/request.py | from werkzeug.wrappers import Request as Base
from functools import lru_cache
from json import loads
class Request(Base):
def __init__(self, environ):
super(Request, self).__init__(environ)
# Params captured from request url
self.params = {}
# Used to mount apps
# eg: mount('/api/users', users.app)
self.prefix = []
# User editable dictionary to pass values between routes.
self.vault = {
'request': self,
'data': self.data,
'params': self.params,
'prefix': self.prefix,
'files': self.files,
'cookies': self.cookies
}
@property
@lru_cache(10)
def body(self):
if self.mimetype == 'application/json':
try:
return loads(self.get_data(as_text=True))
except ValueError:
raise ValueError('Request body is not valid json')
else:
raise ValueError('Request body couldnt be decoded')
| from werkzeug.wrappers import Request as Base
class Request(Base):
def __init__(self, environ):
super(Request, self).__init__(environ)
# Params captured from request url
self.params = {}
# Used to mount apps
# eg: mount('/api/users', users.app)
self.prefix = []
# User editable dictionary to pass values between routes.
self.vault = {
'request': self,
'data': self.data,
'params': self.params,
'prefix': self.prefix,
'files': self.files,
'cookies': self.cookies
}
| cc0-1.0 | Python |
a5724bce1360d5fa290af3a9a240f76ae12d892d | Adjust blocking client output. | jakesyl/twisted-intro,s0lst1ce/twisted-intro,walkinreeds/twisted-intro,shankig/twisted-intro,s0lst1ce/twisted-intro,leixinstar/twisted-intro,elenaoat/tests,shankisg/twisted-intro,elenaoat/tests,leixinstar/twisted-intro,shankig/twisted-intro,jdavisp3/twisted-intro,jakesyl/twisted-intro,shankisg/twisted-intro,vaniakov/twisted-intro,denispin/twisted-intro,jdavisp3/twisted-intro,vaniakov/twisted-intro,denispin/twisted-intro,walkinreeds/twisted-intro | blocking-client/get-poetry.py | blocking-client/get-poetry.py | # This is the blocking Get Poetry Now! client.
import datetime, optparse, socket
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, blocking edition.
Run it like this:
python get-poetry.py port1 port2 port3 ...
If you are in the base directory of the twisted-intro package,
you could run it like this:
python blocking-client/get-poetry.py 1001 1002 1003
to grab poetry from servers on ports 1001, 1002, and 1003.
Of course, there need to be servers listening on those ports
for that to work.
"""
parser = optparse.OptionParser(usage)
_, addresses = parser.parse_args()
if not addresses:
print parser.format_help()
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = ''
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
def get_poetry(address):
"""Download a piece of poetry from the given address."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
poem = ''
while True:
# This is the 'blocking' call in this synchronous program.
# The recv() method will block for an indeterminate period
# of time waiting for bytes to be received from the server.
bytes = sock.recv(1024)
if not bytes:
break
poem += bytes
return poem
def format_address(address):
host, port = address
return '%s:%s' % (host or 'localhost', port)
def main():
addresses = parse_args()
elapsed = datetime.timedelta()
for i, address in enumerate(addresses):
addr_fmt = format_address(address)
print 'Task %d: get poetry from: %s' % (i + 1, addr_fmt)
start = datetime.datetime.now()
# Each execution of 'get_poetry' corresponds to the
# execution of one synchronous task in Figure 1 here:
# http://dpeticol.webfactional.com/blog/?p=1209#figure1
poem = get_poetry(address)
time = datetime.datetime.now() - start
msg = 'Task %d: got %d bytes of poetry from %s in %s'
print msg % (i + 1, len(poem), addr_fmt, time)
elapsed += time
print 'Got %d poems in %s' % (len(addresses), elapsed)
if __name__ == '__main__':
main()
| # This is the blocking Get Poetry Now! client.
import datetime, optparse, socket
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, blocking edition.
Run it like this:
python get-poetry.py port1 port2 port3 ...
If you are in the base directory of the twisted-intro package,
you could run it like this:
python blocking-client/get-poetry.py 1001 1002 1003
to grab poetry from servers on ports 1001, 1002, and 1003.
Of course, there need to be servers listening on those ports
for that to work.
"""
parser = optparse.OptionParser(usage)
_, addresses = parser.parse_args()
if not addresses:
print parser.format_help()
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = ''
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
def get_poetry(address):
"""Download a piece of poetry from the given address."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
poem = ''
while True:
# This is the 'blocking' call in this synchronous program.
# The recv() method will block for an indeterminate period
# of time waiting for bytes to be received from the server.
bytes = sock.recv(1024)
if not bytes:
break
poem += bytes
return poem
def format_address(address):
host, port = address
return '%s:%s' % (host or 'localhost', port)
def main():
addresses = parse_args()
elapsed = datetime.timedelta()
for i, address in enumerate(addresses):
addr_fmt = format_address(address)
print 'Task %d: get poetry from: %s' % (i + 1, addr_fmt)
start = datetime.datetime.now()
# Each execution of 'get_poetry' corresponds to the
# execution of one synchronous task in Figure 1 here:
# http://dpeticol.webfactional.com/blog/?p=1209#figure1
poem = get_poetry(address)
time = datetime.datetime.now() - start
print 'Task %d: got a poem from %s in %s' % (i + 1, addr_fmt, time)
elapsed += time
print 'Got %d poems in %s' % (len(addresses), elapsed)
if __name__ == '__main__':
main()
| mit | Python |
2eaf27d9c3a97c4c7dbe4e653d82c92e22d9afb2 | Print some more info. | jcharum/pycurl,ninemoreminutes/pycurl,jcharum/pycurl,m13253/pycurl-python3,ninemoreminutes/pycurl,m13253/pycurl-python3,ninemoreminutes/pycurl,jcharum/pycurl,m13253/pycurl-python3,ninemoreminutes/pycurl | tests/test_getinfo.py | tests/test_getinfo.py | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# $Id: test_getinfo.py,v 1.18 2003/05/01 19:35:01 mfx Exp $
import time
import pycurl
## Callback function invoked when progress information is updated
def progress(download_t, download_d, upload_t, upload_d):
print "Total to download %d bytes, have %d bytes so far" % \
(download_t, download_d)
url = "http://www.cnn.com"
print "Starting downloading", url
print
f = open("body", "wb")
h = open("header", "wb")
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.PROGRESSFUNCTION, progress)
c.setopt(c.FOLLOWLOCATION, 1)
c.setopt(c.MAXREDIRS, 5)
c.setopt(c.WRITEHEADER, h)
c.setopt(c.OPT_FILETIME, 1)
c.perform()
print
print "HTTP-code:", c.getinfo(c.HTTP_CODE)
print "Total-time:", c.getinfo(c.TOTAL_TIME)
print "Download speed: %.2f bytes/second" % c.getinfo(c.SPEED_DOWNLOAD)
print "Document size: %d bytes" % c.getinfo(c.SIZE_DOWNLOAD)
print "Effective URL:", c.getinfo(c.EFFECTIVE_URL)
print "Content-type:", c.getinfo(c.CONTENT_TYPE)
print "Namelookup-time:", c.getinfo(c.NAMELOOKUP_TIME)
print "Redirect-time:", c.getinfo(c.REDIRECT_TIME)
print "Redirect-count:", c.getinfo(c.REDIRECT_COUNT)
epoch = c.getinfo(c.INFO_FILETIME)
print "Filetime: %d (%s)" % (epoch, time.ctime(epoch))
print
print "Header is in file 'header', body is in file 'body'"
c.close()
f.close()
h.close()
| #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# $Id: test_getinfo.py,v 1.17 2003/04/21 18:46:10 mfx Exp $
import time
import pycurl
## Callback function invoked when progress information is updated
def progress(download_t, download_d, upload_t, upload_d):
print "Total to download %d bytes, have %d bytes so far" % \
(download_t, download_d)
url = "http://www.cnn.com"
print "Starting downloading", url
print
f = open("body", "wb")
h = open("header", "wb")
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.PROGRESSFUNCTION, progress)
c.setopt(c.FOLLOWLOCATION, 1)
c.setopt(c.MAXREDIRS, 5)
c.setopt(c.WRITEHEADER, h)
c.setopt(c.OPT_FILETIME, 1)
c.perform()
print
print "Download speed: %.2f bytes/second" % c.getinfo(c.SPEED_DOWNLOAD)
print "Document size: %d bytes" % c.getinfo(c.SIZE_DOWNLOAD)
print "Effective URL:", c.getinfo(c.EFFECTIVE_URL)
print "Content-type:", c.getinfo(c.CONTENT_TYPE)
print "Redirect-time:", c.getinfo(c.REDIRECT_TIME)
print "Redirect-count:", c.getinfo(c.REDIRECT_COUNT)
epoch = c.getinfo(c.INFO_FILETIME)
print "Filetime: %d (%s)" % (epoch, time.ctime(epoch))
print
print "Header is in file 'header', body is in file 'body'"
c.close()
f.close()
h.close()
| lgpl-2.1 | Python |
b7ebaf022fd2c0992afeb0e87ce510376e799224 | bump version | ii0/python-goose,voidfiles/python-goose,goose3/goose3,vetal4444/python-goose,goose3/goose3,goose3/goose,raven47git/python-goose,samim23/python-goose,jaepil/python-goose,raven47git/python-goose,goose3/goose,muckrack/python-goose,github4ry/python-goose,blmlove409/python-goose,blmlove409/python-goose,ii0/python-goose,muckrack/python-goose,heianxing/python-goose,jaepil/python-goose,grangier/python-goose,cursesun/python-goose,github4ry/python-goose,cursesun/python-goose,heianxing/python-goose,grangier/python-goose,AGoodId/python-goose,robmcdan/python-goose,vetal4444/python-goose,AGoodId/python-goose,samim23/python-goose,robmcdan/python-goose | goose/version.py | goose/version.py | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
version_info = (1, 0, 21)
__version__ = ".".join(map(str, version_info))
| # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
version_info = (1, 0, 20)
__version__ = ".".join(map(str, version_info))
| apache-2.0 | Python |
4a42285b6b0afbd5dbd23df049ab2a6d493fc256 | Add production domain | tvorogme/gotosite,tvorogme/gotosite,tvorogme/gotosite | gotosite/urls.py | gotosite/urls.py | from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from main.views import *
domain = 'https://goto.msk.ru/new/'
urlpatterns = [
# Admin
url(r'^%sadmin/' % domain, admin.site.urls),
# Index
url(r'^%s$' % domain, index, name='index'),
# About
# url(r'^about_us/$', about_us, name='about_us'),
# Profile
url(r'^%sprofile/$' % domain, profile_page, name='SelfProfile'),
url(r'^%sprofile/(?P<_id>[0-9]+)/$' % domain, profile_page, name='Profile'),
url(r'^%sprofile/edit/$' % domain, update_profile),
url(r'^%sprofile/remove_education/$' % domain, remove_education),
url(r'^%sprofile/add_achievement/$' % domain, add_achievement),
url(r'^%sprofile/remove_achievement/$' % domain, remove_achievement),
# Login system
url(r'^%slogout/$' % domain, logout_wrapper),
url(r'^%slogin/$' % domain, login_wrapper),
url(r'^%sactivate/' % domain, activation),
# Signup render
url(r'^%ssignup/$' % domain, signup_page),
# Social backend
url(r'^%saccounts/' % domain, include('allauth.urls')),
# API
url(r'^%sapi/get_needed_skills/' % domain, get_needed_skills),
url(r'^%sapi/get_needed_cities/' % domain, get_needed_cities),
url(r'^%sapi/get_needed_schools_names/' % domain, get_needed_schools_names),
url(r'^%sapi/update_avatar/' % domain, update_avatar)
]
if settings.DEBUG:
from django.views.static import serve
urlpatterns.append(url(r'^favicon.ico', lambda r: serve(r, 'favicon.ico')))
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from main.views import *
urlpatterns = [
# Admin
url(r'^admin/', admin.site.urls),
# Index
url(r'^$', index, name='index'),
# About
# url(r'^about_us/$', about_us, name='about_us'),
# Profile
url(r'^profile/$', profile_page, name='SelfProfile'),
url(r'^profile/(?P<_id>[0-9]+)/$', profile_page, name='Profile'),
url(r'^profile/edit/$', update_profile),
url(r'^profile/remove_education/$', remove_education),
url(r'^profile/add_achievement/$', add_achievement),
url(r'^profile/remove_achievement/$', remove_achievement),
# Login system
url(r'^logout/$', logout_wrapper),
url(r'^login/$', login_wrapper),
url(r'^activate/', activation),
# Signup render
url(r'^signup/$', signup_page),
# Social backend
url(r'^accounts/', include('allauth.urls')),
# API
url(r'^api/get_needed_skills/', get_needed_skills),
url(r'^api/get_needed_cities/', get_needed_cities),
url(r'^api/get_needed_schools_names/', get_needed_schools_names),
url(r'^api/update_avatar/', update_avatar)
]
if settings.DEBUG:
from django.views.static import serve
urlpatterns.append(url(r'^favicon.ico', lambda r: serve(r, 'favicon.ico')))
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| apache-2.0 | Python |
a8855bd2045f965bdeb783cfa1bd51c311bf013c | Replace domain to localhost | tvorogme/gotosite,tvorogme/gotosite,tvorogme/gotosite | gotosite/urls.py | gotosite/urls.py | from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from main.views import *
domain = 'http://localhost:8000/new/'
urlpatterns = [
# Admin
url(r'^%sadmin/' % domain, admin.site.urls),
# Index
url(r'^%s$' % domain, index, name='index'),
# About
# url(r'^about_us/$', about_us, name='about_us'),
# Profile
url(r'^%sprofile/$' % domain, profile_page, name='SelfProfile'),
url(r'^%sprofile/(?P<_id>[0-9]+)/$' % domain, profile_page, name='Profile'),
url(r'^%sprofile/edit/$' % domain, update_profile),
url(r'^%sprofile/remove_education/$' % domain, remove_education),
url(r'^%sprofile/add_achievement/$' % domain, add_achievement),
url(r'^%sprofile/remove_achievement/$' % domain, remove_achievement),
# Login system
url(r'^%slogout/$' % domain, logout_wrapper),
url(r'^%slogin/$' % domain, login_wrapper),
url(r'^%sactivate/' % domain, activation),
# Signup render
url(r'^%ssignup/$' % domain, signup_page),
# Social backend
url(r'^%saccounts/' % domain, include('allauth.urls')),
# API
url(r'^%sapi/get_needed_skills/' % domain, get_needed_skills),
url(r'^%sapi/get_needed_cities/' % domain, get_needed_cities),
url(r'^%sapi/get_needed_schools_names/' % domain, get_needed_schools_names),
url(r'^%sapi/update_avatar/' % domain, update_avatar)
]
if settings.DEBUG:
from django.views.static import serve
urlpatterns.append(url(r'^favicon.ico', lambda r: serve(r, 'favicon.ico')))
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from main.views import *
domain = 'https://goto.msk.ru/new/'
urlpatterns = [
# Admin
url(r'^%sadmin/' % domain, admin.site.urls),
# Index
url(r'^%s$' % domain, index, name='index'),
# About
# url(r'^about_us/$', about_us, name='about_us'),
# Profile
url(r'^%sprofile/$' % domain, profile_page, name='SelfProfile'),
url(r'^%sprofile/(?P<_id>[0-9]+)/$' % domain, profile_page, name='Profile'),
url(r'^%sprofile/edit/$' % domain, update_profile),
url(r'^%sprofile/remove_education/$' % domain, remove_education),
url(r'^%sprofile/add_achievement/$' % domain, add_achievement),
url(r'^%sprofile/remove_achievement/$' % domain, remove_achievement),
# Login system
url(r'^%slogout/$' % domain, logout_wrapper),
url(r'^%slogin/$' % domain, login_wrapper),
url(r'^%sactivate/' % domain, activation),
# Signup render
url(r'^%ssignup/$' % domain, signup_page),
# Social backend
url(r'^%saccounts/' % domain, include('allauth.urls')),
# API
url(r'^%sapi/get_needed_skills/' % domain, get_needed_skills),
url(r'^%sapi/get_needed_cities/' % domain, get_needed_cities),
url(r'^%sapi/get_needed_schools_names/' % domain, get_needed_schools_names),
url(r'^%sapi/update_avatar/' % domain, update_avatar)
]
if settings.DEBUG:
from django.views.static import serve
urlpatterns.append(url(r'^favicon.ico', lambda r: serve(r, 'favicon.ico')))
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| apache-2.0 | Python |
9056febc4c29984bb838dcaae3d209841ed27344 | add tests for apply_referrer method | Chris7/django-referral,byteweaver/django-referral | referral/tests/models_tests.py | referral/tests/models_tests.py | from django.http import HttpRequest
from django.test import TestCase
from referral.models import UserReferrer
from referral import settings
from referral.tests.factories import CampaignFactory, ReferrerFactory, UserReferrerFactory, UserFactory
class CampaignTestCase(TestCase):
def test_model(self):
obj = CampaignFactory()
self.assertTrue(obj.pk)
def test_count_users(self):
obj = CampaignFactory()
self.assertEqual(obj.count_users(),0)
ReferrerFactory(campaign=obj)
self.assertEqual(obj.count_users(),0)
ref = ReferrerFactory(campaign=obj)
UserReferrerFactory(referrer=ref)
self.assertEqual(obj.count_users(),1)
class ReferrerTestCase(TestCase):
def test_model(self):
obj = ReferrerFactory()
self.assertTrue(obj.pk)
def test_match_campaign(self):
obj = ReferrerFactory()
obj.match_campaign()
CampaignFactory()
self.assertIsNone(obj.campaign)
campaign = CampaignFactory(pattern="Test Referrer")
obj.match_campaign()
self.assertEqual(obj.campaign, campaign)
class UserReferrerTestCase(TestCase):
def test_model(self):
obj = UserReferrerFactory()
self.assertTrue(obj.pk)
def test_manager_apply_referrer_no_ref(self):
user = UserFactory()
request = HttpRequest()
request.session = {}
UserReferrer.objects.apply_referrer(user, request)
try:
user.user_referrer
except UserReferrer.DoesNotExist:
pass
else:
assert False, "Referrer should not exist!"
def test_manager_apply_referrer(self):
referrer = ReferrerFactory()
user = UserFactory()
request = HttpRequest()
request.session = {settings.SESSION_KEY: referrer}
UserReferrer.objects.apply_referrer(user, request)
self.assertEqual(user.user_referrer.referrer, referrer)
| from django.test import TestCase
from referral.tests.factories import CampaignFactory, ReferrerFactory, UserReferrerFactory
class CampaignTestCase(TestCase):
def test_model(self):
obj = CampaignFactory()
self.assertTrue(obj.pk)
def test_count_users(self):
obj = CampaignFactory()
self.assertEqual(obj.count_users(),0)
ReferrerFactory(campaign=obj)
self.assertEqual(obj.count_users(),0)
ref = ReferrerFactory(campaign=obj)
UserReferrerFactory(referrer=ref)
self.assertEqual(obj.count_users(),1)
class ReferrerTestCase(TestCase):
def test_model(self):
obj = ReferrerFactory()
self.assertTrue(obj.pk)
def test_match_campaign(self):
obj = ReferrerFactory()
obj.match_campaign()
CampaignFactory()
self.assertIsNone(obj.campaign)
campaign = CampaignFactory(pattern="Test Referrer")
obj.match_campaign()
self.assertEqual(obj.campaign, campaign)
class UserReferrerTestCase(TestCase):
def test_model(self):
obj = UserReferrerFactory()
self.assertTrue(obj.pk)
| mit | Python |
5ac248bcbe1e2c0a97754fad185f8a9b2eff2280 | disable weird test. | abadger/Bento,cournape/Bento,abadger/Bento,abadger/Bento,cournape/Bento,cournape/Bento,abadger/Bento,cournape/Bento | tests/test_parsing.py | tests/test_parsing.py | import os
import tempfile
import shutil
import unittest
import sys
from os.path import \
join, dirname
from nose.tools import \
assert_equal, raises
try:
from cStringIO import StringIO
finally:
from StringIO import StringIO
from bento.core.pkg_objects import \
PathOption, FlagOption, Executable, DataFiles
from bento.core.parse_utils import \
CommaListLexer, comma_list_split
from bento.core.options import \
PackageOptions
from bento \
import PackageDescription, static_representation
#old = sys.path[:]
#try:
# sys.path.insert(0, join(dirname(__file__), "pkgdescr"))
# from simple_package import PKG, DESCR
#finally:
# sys.path = old
class TestDataFiles(unittest.TestCase):
def test_simple(self):
text = """\
Name: foo
DataFiles: data
TargetDir: $datadir
Files:
foo.data
"""
r_data = DataFiles("data", files=["foo.data"], target_dir="$datadir")
pkg = PackageDescription.from_string(text)
self.failUnless("data" in pkg.data_files)
assert_equal(pkg.data_files["data"].__dict__, r_data.__dict__)
class TestOptions(unittest.TestCase):
simple_text = """\
Name: foo
Flag: flag1
Description: flag1 description
Default: false
Path: foo
Description: foo description
Default: /usr/lib
"""
def _test_simple(self, opts):
self.failUnless(opts.name, "foo")
flag = FlagOption("flag1", "false", "flag1 description")
self.failUnless(opts.flag_options.keys(), ["flags"])
self.failUnless(opts.flag_options["flag1"], flag.__dict__)
path = PathOption("foo", "/usr/lib", "foo description")
self.failUnless(opts.path_options.keys(), ["foo"])
self.failUnless(opts.path_options["foo"], path.__dict__)
def test_simple_from_string(self):
s = self.simple_text
opts = PackageOptions.from_string(s)
self._test_simple(opts)
def test_simple_from_file(self):
fid, filename = tempfile.mkstemp(suffix=".info")
try:
os.write(fid, self.simple_text)
opts = PackageOptions.from_file(filename)
self._test_simple(opts)
finally:
os.close(fid)
os.remove(filename)
| import os
import tempfile
import shutil
import unittest
import sys
from os.path import \
join, dirname
from nose.tools import \
assert_equal, raises
try:
from cStringIO import StringIO
finally:
from StringIO import StringIO
from bento.core.pkg_objects import \
PathOption, FlagOption, Executable, DataFiles
from bento.core.parse_utils import \
CommaListLexer, comma_list_split
from bento.core.options import \
PackageOptions
from bento \
import PackageDescription, static_representation
old = sys.path[:]
try:
sys.path.insert(0, join(dirname(__file__), "pkgdescr"))
from simple_package import PKG, DESCR
finally:
sys.path = old
class TestDataFiles(unittest.TestCase):
def test_simple(self):
text = """\
Name: foo
DataFiles: data
TargetDir: $datadir
Files:
foo.data
"""
r_data = DataFiles("data", files=["foo.data"], target_dir="$datadir")
pkg = PackageDescription.from_string(text)
self.failUnless("data" in pkg.data_files)
assert_equal(pkg.data_files["data"].__dict__, r_data.__dict__)
class TestOptions(unittest.TestCase):
simple_text = """\
Name: foo
Flag: flag1
Description: flag1 description
Default: false
Path: foo
Description: foo description
Default: /usr/lib
"""
def _test_simple(self, opts):
self.failUnless(opts.name, "foo")
flag = FlagOption("flag1", "false", "flag1 description")
self.failUnless(opts.flag_options.keys(), ["flags"])
self.failUnless(opts.flag_options["flag1"], flag.__dict__)
path = PathOption("foo", "/usr/lib", "foo description")
self.failUnless(opts.path_options.keys(), ["foo"])
self.failUnless(opts.path_options["foo"], path.__dict__)
def test_simple_from_string(self):
s = self.simple_text
opts = PackageOptions.from_string(s)
self._test_simple(opts)
def test_simple_from_file(self):
fid, filename = tempfile.mkstemp(suffix=".info")
try:
os.write(fid, self.simple_text)
opts = PackageOptions.from_file(filename)
self._test_simple(opts)
finally:
os.close(fid)
os.remove(filename)
| bsd-3-clause | Python |
3340367519181ef933847e655cc08a63474b7d06 | Remove the tests for metanl_word_frequency too. Doh. | LuminosoInsight/wordfreq | tests/test_queries.py | tests/test_queries.py | from __future__ import unicode_literals
from nose.tools import eq_, assert_almost_equal, assert_greater
from wordfreq.query import (word_frequency, average_frequency, wordlist_size,
wordlist_info)
def test_freq_examples():
assert_almost_equal(
word_frequency('normalization', 'en', 'google-books'),
1.767e-6, places=9
)
assert_almost_equal(
word_frequency('normalization', 'en', 'google-books', 1e-6),
2.767e-6, places=9
)
assert_almost_equal(
word_frequency('normalisation', 'fr', 'leeds-internet'),
4.162e-6, places=9
)
assert_greater(
word_frequency('lol', 'xx', 'twitter'),
word_frequency('lol', 'en', 'google-books')
)
eq_(
word_frequency('totallyfakeword', 'en', 'multi', .5),
.5
)
def _check_normalized_frequencies(wordlist, lang):
assert_almost_equal(
average_frequency(wordlist, lang) * wordlist_size(wordlist, lang),
1.0, places=6
)
def test_normalized_frequencies():
for list_info in wordlist_info():
wordlist = list_info['wordlist']
lang = list_info['lang']
yield _check_normalized_frequencies, wordlist, lang
| from __future__ import unicode_literals
from nose.tools import eq_, assert_almost_equal, assert_greater
from wordfreq.query import (word_frequency, average_frequency, wordlist_size,
wordlist_info, metanl_word_frequency)
def test_freq_examples():
assert_almost_equal(
word_frequency('normalization', 'en', 'google-books'),
1.767e-6, places=9
)
assert_almost_equal(
word_frequency('normalization', 'en', 'google-books', 1e-6),
2.767e-6, places=9
)
assert_almost_equal(
word_frequency('normalisation', 'fr', 'leeds-internet'),
4.162e-6, places=9
)
assert_greater(
word_frequency('lol', 'xx', 'twitter'),
word_frequency('lol', 'en', 'google-books')
)
eq_(
word_frequency('totallyfakeword', 'en', 'multi', .5),
.5
)
def test_compatibility():
assert_almost_equal(metanl_word_frequency('the|en'), 1e9, places=3)
assert_almost_equal(metanl_word_frequency('the|en', offset=1e9), 2e9, places=3)
def _check_normalized_frequencies(wordlist, lang):
assert_almost_equal(
average_frequency(wordlist, lang) * wordlist_size(wordlist, lang),
1.0, places=6
)
def test_normalized_frequencies():
for list_info in wordlist_info():
wordlist = list_info['wordlist']
lang = list_info['lang']
yield _check_normalized_frequencies, wordlist, lang
| mit | Python |
8fedd77849b320140dbf8588a4c91afeba2b674d | Add test for full Bucket | leonkoens/dht | tests/test_routing.py | tests/test_routing.py | import unittest
from dht.node import Node, SelfNode
from dht.routing import BucketTree
from dht.utils import hash_string
from dht.bucket import NodeAlreadyAddedException, BucketIsFullException
from dht import settings
class BucketTreeTest(unittest.TestCase):
"""
BucketTree.find_node(key)
BucketTree.add_node(node)
"""
def get_new_tree(self):
tree = BucketTree(SelfNode(key='0'))
assert len(tree.bucket_node_list) == 1
return tree
def test_add_neighbour(self):
""" Test add_node on BucketTree with the neighbour of SelfNode. This should
create settings.KEY_SIZE + 1 nodes. """
tree = self.get_new_tree()
# This node is right next to the SelfNode, this creates lots of Buckets.
node = Node(key='1')
tree.add_node(node)
assert len(tree.bucket_node_list) == settings.KEY_SIZE + 1
def test_add_node_twice(self):
""" Add a Node twice. This should raise a NodeAlreadyAddedException. """
tree = self.get_new_tree()
key = hash_string('test')
node1 = Node(key=key)
node2 = Node(key=key)
tree.add_node(node1)
# Adding a Node with a key that is already in the tree should raise an Exception.
with self.assertRaises(NodeAlreadyAddedException):
tree.add_node(node2)
def test_find_node(self):
""" Find a Node in the BucketTree. """
tree = self.get_new_tree()
key = hash_string('test')
node1 = Node(key=key)
tree.add_node(node1)
# Looking for the key again should return the same Node.
node2 = tree.find_node(key)
self.assertEquals(node1, node2)
def test_full_bucket(self):
tree = self.get_new_tree()
# This begins with a 1 in binary.
key = hash_string('test')
dec_key = int(key, 16)
# Add keys in the same Bucket.
for i in range(0, settings.BUCKET_SIZE + settings.BUCKET_REPLACEMENT_CACHE_SIZE):
key = hex(dec_key + i)[2:]
node = Node(key=key)
tree.add_node(node)
i += 1
key = hex(dec_key + i)[2:]
node = Node(key=key)
# Bucket should be full by now.
with self.assertRaises(BucketIsFullException):
tree.add_node(node)
| import unittest
from dht.node import Node, SelfNode
from dht.routing import BucketTree
from dht.utils import hash_string
from dht.bucket import NodeAlreadyAddedException
from dht import settings
class BucketTreeTest(unittest.TestCase):
"""
BucketTree.find_node(key)
BucketTree.add_node(node)
"""
def get_new_tree(self):
tree = BucketTree(SelfNode(key='0'))
assert len(tree.bucket_node_list) == 1
return tree
def test_add_neighbour(self):
""" Test add_node on BucketTree with the neighbour of SelfNode. This should
create settings.KEY_SIZE + 1 nodes. """
tree = self.get_new_tree()
# This node is right next to the SelfNode, this creates lots of Buckets.
node = Node(key='1')
tree.add_node(node)
assert len(tree.bucket_node_list) == settings.KEY_SIZE + 1
def test_add_node_twice(self):
""" Add a Node twice. This should raise a NodeAlreadyAddedException. """
tree = self.get_new_tree()
key = hash_string('test')
node1 = Node(key=key)
node2 = Node(key=key)
tree.add_node(node1)
# Adding a Node with a key that is already in the tree should raise an Exception.
with self.assertRaises(NodeAlreadyAddedException):
tree.add_node(node2)
def test_find_node(self):
""" Find a Node in the BucketTree. """
tree = self.get_new_tree()
key = hash_string('test')
node1 = Node(key=key)
tree.add_node(node1)
# Looking for the key again should return the same Node.
node2 = tree.find_node(key)
self.assertEquals(node1, node2)
| mit | Python |
7d165a1855339ee06c2982176ab33c2f02f75b35 | return the most 8 colors | LanceGin/haishoku | haishoku/main.py | haishoku/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-05-15 15:10
# @Author : Gin (gin.lance.inside@hotmail.com)
# @Link :
# @Disc : haishoku main function
import sys
import haishoku
import alg
def main():
# get image_path from system args
image_path = sys.argv[1]
# get colors tuple with haishoku module
image_colors = haishoku.get_colors(image_path)
# sort the image colors tuple
sorted_image_colors = alg.sort_by_rgb(image_colors)
# group the colors by the accuaracy
grouped_image_colors = alg.group_by_accuracy(sorted_image_colors)
# get the weighted mean of all colors
colors_mean = []
for i in range(3):
for j in range(3):
for k in range(3):
grouped_image_color = grouped_image_colors[i][j][k]
if 0 != len(grouped_image_color):
color_mean = alg.get_weighted_mean(grouped_image_color)
colors_mean.append(color_mean)
# return the most 8 colors
temp_sorted_colors_mean = sorted(colors_mean)
if 8 < len(temp_sorted_colors_mean):
colors_mean = temp_sorted_colors_mean[len(temp_sorted_colors_mean)-8 : len(temp_sorted_colors_mean)]
else:
colors_mean = temp_sorted_colors_mean
for color_mean in colors_mean:
print(color_mean)
if __name__ == "__main__":
print("hello haishoku.")
main() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-05-15 15:10
# @Author : Gin (gin.lance.inside@hotmail.com)
# @Link :
# @Disc : haishoku main function
import sys
import haishoku
import alg
def main():
# get image_path from system args
image_path = sys.argv[1]
# get colors tuple with haishoku module
image_colors = haishoku.get_colors(image_path)
# sort the image colors tuple
sorted_image_colors = alg.sort_by_rgb(image_colors)
# group the colors by the accuaracy
grouped_image_colors = alg.group_by_accuracy(sorted_image_colors)
# get the weighted mean of all colors
colors_mean = []
for i in range(3):
for j in range(3):
for k in range(3):
grouped_image_color = grouped_image_colors[i][j][k]
if 0 != len(grouped_image_color):
color_mean = alg.get_weighted_mean(grouped_image_color)
colors_mean.append(color_mean)
for color_mean in sorted(colors_mean):
print(color_mean)
if __name__ == "__main__":
print("hello haishoku.")
main() | mit | Python |
90a19e1987903456eb890164d558e57c6ebf2ae0 | Set up media for deploy. | flavoi/diventi,flavoi/diventi | config/settings/production.py | config/settings/production.py | import dj_database_url
from .base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env_variable('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": dj_database_url.config(),
}
ALLOWED_HOSTS = [
'diventi.herokuapp.com',
'localhost',
]
# Host static and media on Amazon S3 support
# http://aws.amazon.com/
AWS_ACCESS_KEY_ID = get_env_variable('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = get_env_variable('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = get_env_variable('AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
AWS_LOCATION = 'static'
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATIC_URL = '//%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
MEDIA_LOCATION = 'media'
MEDIA_ROOT = '/%s/' % MEDIA_LOCATION
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
MEDIA_URL = '//%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)
| import dj_database_url
from .base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env_variable('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": dj_database_url.config(),
}
ALLOWED_HOSTS = [
'diventi.herokuapp.com',
'localhost',
]
# Host static and media on Amazon S3 support
# http://aws.amazon.com/
AWS_ACCESS_KEY_ID = get_env_variable('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = get_env_variable('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = get_env_variable('AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
AWS_LOCATION = 'static'
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATIC_URL = '//%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
MEDIA_LOCATION = 'media'
MEDIA_ROOT = '/%s/' % MEDIA_LOCATION
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3BotoStorage'
MEDIA_URL = '//%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)
| apache-2.0 | Python |
b7c7391fb6f7ec8863d60cb448e24fc0cd18f3cb | Optimize timer.sleep and timer.timeout creation and execution. | abusesa/idiokit | idiokit/timer.py | idiokit/timer.py | from __future__ import absolute_import
from functools import partial
from . import idiokit
from ._selectloop import cancel as selectloop_cancel, sleep as selectloop_sleep
def _cancel(node, _):
selectloop_cancel(node)
def sleep(delay):
event = idiokit.Event()
node = selectloop_sleep(delay, event.succeed)
event.result().unsafe_listen(partial(_cancel, node))
return event
class Timeout(Exception):
pass
def timeout(timeout, stream=None, throw=Timeout()):
if stream is None:
stream = idiokit.Event()
node = selectloop_sleep(timeout, stream.throw, throw)
stream.result().unsafe_listen(partial(_cancel, node))
return stream
| from . import idiokit, _selectloop
@idiokit.stream
def sleep(delay):
event = idiokit.Event()
node = _selectloop.sleep(delay, event.succeed)
try:
yield event
except:
_selectloop.cancel(node)
raise
class Timeout(Exception):
pass
@idiokit.stream
def timeout(timeout, stream=None, throw=Timeout()):
if stream is None:
stream = idiokit.Event()
node = _selectloop.sleep(timeout, stream.throw, throw)
try:
result = yield stream
finally:
_selectloop.cancel(node)
idiokit.stop(result)
| mit | Python |
35a62db7ac313e87ae6a7ff6471e302678232466 | Bump version for pypi to 0.2018.01.05.0208 | oduwsdl/ipwb,oduwsdl/ipwb,oduwsdl/ipwb,oduwsdl/ipwb | ipwb/__init__.py | ipwb/__init__.py | __version__ = '0.2018.01.05.0208'
| __version__ = '0.2018.01.05.0029'
| mit | Python |
27bc3fc887092bd01b7ecbf69ce6b86e92287e20 | fix adding BGGClientLegacy to exports (prefer using extend with a list in case there could be more classes to add) | lcosmin/boardgamegeek | boardgamegeek/__init__.py | boardgamegeek/__init__.py | # coding: utf-8
"""
.. module:: boardgamegeek
:platform: Unix, Windows
:synopsis: interface to boardgamegeek.com
.. moduleauthor:: Cosmin Luță <q4break@gmail.com>
"""
from .api import BGGClient, BGGChoose, BGGRestrictDomainTo, BGGRestrictPlaysTo, BGGRestrictSearchResultsTo, BGGRestrictCollectionTo
from .legacy_api import BGGClientLegacy
from .exceptions import BGGError, BGGApiRetryError, BGGApiError, BGGApiTimeoutError, BGGValueError, BGGItemNotFoundError
from .cache import CacheBackendNone, CacheBackendMemory, CacheBackendSqlite
from .version import __version__
__all__ = ["BGGClient", "BGGChoose", "BGGRestrictSearchResultsTo", "BGGRestrictPlaysTo", "BGGRestrictDomainTo",
"BGGRestrictCollectionTo", "BGGError", "BGGValueError", "BGGApiRetryError", "BGGApiError",
"BGGApiTimeoutError", "BGGItemNotFoundError", "CacheBackendNone", "CacheBackendSqlite", "CacheBackendMemory"]
__all__.extend(["BGGClientLegacy"])
__import__('pkg_resources').declare_namespace(__name__)
| # coding: utf-8
"""
.. module:: boardgamegeek
:platform: Unix, Windows
:synopsis: interface to boardgamegeek.com
.. moduleauthor:: Cosmin Luță <q4break@gmail.com>
"""
from .api import BGGClient, BGGChoose, BGGRestrictDomainTo, BGGRestrictPlaysTo, BGGRestrictSearchResultsTo, BGGRestrictCollectionTo
from .legacy_api import BGGClientLegacy
from .exceptions import BGGError, BGGApiRetryError, BGGApiError, BGGApiTimeoutError, BGGValueError, BGGItemNotFoundError
from .cache import CacheBackendNone, CacheBackendMemory, CacheBackendSqlite
from .version import __version__
__all__ = ["BGGClient", "BGGChoose", "BGGRestrictSearchResultsTo", "BGGRestrictPlaysTo", "BGGRestrictDomainTo",
"BGGRestrictCollectionTo", "BGGError", "BGGValueError", "BGGApiRetryError", "BGGApiError",
"BGGApiTimeoutError", "BGGItemNotFoundError", "CacheBackendNone", "CacheBackendSqlite", "CacheBackendMemory"]
__all__.append(["BGGClientLegacy"])
__import__('pkg_resources').declare_namespace(__name__)
| bsd-3-clause | Python |
acf3819d433f3ebc3d3eed17c61f2542f7429f8e | Use __file__ instead of inspect, for compatibility with frozen environments | mikedh/trimesh,mikedh/trimesh,dajusc/trimesh,mikedh/trimesh,mikedh/trimesh,dajusc/trimesh | trimesh/resources/__init__.py | trimesh/resources/__init__.py | import os
# find the current absolute path to this directory
_pwd = os.path.dirname(__file__)
def get_resource(name, decode=True):
"""
Get a resource from the trimesh/resources folder.
Parameters
-------------
name : str
File path relative to `trimesh/resources`
decode : bool
Whether or not to decode result as UTF-8
Returns
-------------
resource : str or bytes
File data
"""
# get the resource using relative names
with open(os.path.join(_pwd, name), 'rb') as f:
resource = f.read()
# make sure we return it as a string if asked
if decode and hasattr(resource, 'decode'):
return resource.decode('utf-8')
return resource
| import os
import inspect
# find the current absolute path using inspect
_pwd = os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe())))
def get_resource(name, decode=True):
"""
Get a resource from the trimesh/resources folder.
Parameters
-------------
name : str
File path relative to `trimesh/resources`
decode : bool
Whether or not to decode result as UTF-8
Returns
-------------
resource : str or bytes
File data
"""
# get the resource using relative names
with open(os.path.join(_pwd, name), 'rb') as f:
resource = f.read()
# make sure we return it as a string if asked
if decode and hasattr(resource, 'decode'):
return resource.decode('utf-8')
return resource
| mit | Python |
c20d34b0f26c8b97e8ac44bcab241f74ee8bbccf | fix import | theno/fabsetup,theno/fabsetup | fabfile/setup/calibre.py | fabfile/setup/calibre.py | import os.path
from fabsetup.fabutils import task
from fabsetup.fabutils import subtask, run
from fabsetup.utils import flo
@task
def calibre():
'''Install or update calibre (ebook management tool).
More info:
https://calibre-ebook.com/
https://calibre-ebook.com/download_linux
https://github.com/kovidgoyal/calibre
'''
instdir = '~/bin/calibre-bin'
install_calibre(instdir)
@subtask
def install_calibre(instdir):
inst_parent = os.path.dirname(instdir)
run(flo('mkdir -p {inst_parent}'))
run(flo('wget -nv -O- '
'https://raw.githubusercontent.com/kovidgoyal/calibre/master/setup/'
'linux-installer.py | '
'python -c "import sys; '
'main=lambda x,y:sys.stderr.write(\'Download failed\\n\'); '
'exec(sys.stdin.read()); main(\'{inst_parent}\', True)"'))
# calibre-installer installs into {inst_parent}/calibre/; needs to be moved
run(flo('rm -rf {instdir}'))
run(flo('mv {inst_parent}/calibre {instdir}'))
run(flo('ln -snf {instdir}/calibre ~/bin/calibre'))
| import os.path
from fabsetup.fabutils import custom_task as task # here, every task is custom
from fabsetup.fabutils import subtask, run
from fabsetup.utils import flo
@task
def calibre():
'''Install or update calibre (ebook management tool).
More info:
https://calibre-ebook.com/
https://calibre-ebook.com/download_linux
https://github.com/kovidgoyal/calibre
'''
instdir = '~/bin/calibre-bin'
install_calibre(instdir)
@subtask
def install_calibre(instdir):
inst_parent = os.path.dirname(instdir)
run(flo('mkdir -p {inst_parent}'))
run(flo('wget -nv -O- '
'https://raw.githubusercontent.com/kovidgoyal/calibre/master/setup/'
'linux-installer.py | '
'python -c "import sys; '
'main=lambda x,y:sys.stderr.write(\'Download failed\\n\'); '
'exec(sys.stdin.read()); main(\'{inst_parent}\', True)"'))
# calibre-installer installs into {inst_parent}/calibre/; needs to be moved
run(flo('rm -rf {instdir}'))
run(flo('mv {inst_parent}/calibre {instdir}'))
run(flo('ln -snf {instdir}/calibre ~/bin/calibre'))
| mit | Python |
83dabc9fc1142e1575843d3a68c6241185543936 | Make the warning for SQLite not being supported a print instead of an exception. | duointeractive/django-fabtastic | fabtastic/db/__init__.py | fabtastic/db/__init__.py | from django.conf import settings
from fabtastic.db import util
db_engine = util.get_db_setting('ENGINE')
if 'postgresql_psycopg2' in db_engine:
from fabtastic.db.postgres import *
else:
print("Fabtastic WARNING: DB engine '%s' is not supported" % db_engine)
| from django.conf import settings
from fabtastic.db import util
db_engine = util.get_db_setting('ENGINE')
if 'postgresql_psycopg2' in db_engine:
from fabtastic.db.postgres import *
else:
raise NotImplementedError("Fabtastic: DB engine '%s' is not supported" % db_engine) | bsd-3-clause | Python |
5ea4f15c7228ab41fefea437a9aeca6f1791e55b | make sure the verb storage is initialised | smuser90/Stream-Framework,smuser90/Stream-Framework,izhan/Stream-Framework,turbolabtech/Stream-Framework,SergioChan/Stream-Framework,izhan/Stream-Framework,SergioChan/Stream-Framework,SergioChan/Stream-Framework,nikolay-saskovets/Feedly,Anislav/Stream-Framework,turbolabtech/Stream-Framework,turbolabtech/Stream-Framework,izhan/Stream-Framework,Anislav/Stream-Framework,nikolay-saskovets/Feedly,SergioChan/Stream-Framework,turbolabtech/Stream-Framework,Anislav/Stream-Framework,smuser90/Stream-Framework,nikolay-saskovets/Feedly,nikolay-saskovets/Feedly,smuser90/Stream-Framework,Anislav/Stream-Framework,izhan/Stream-Framework | feedly/verbs/__init__.py | feedly/verbs/__init__.py | from feedly.utils import get_class_from_string
VERB_DICT = dict()
def get_verb_storage():
from feedly import settings
if settings.FEEDLY_VERB_STORAGE == 'in-memory':
return VERB_DICT
else:
return get_class_from_string(settings.FEEDLY_VERB_STORAGE)()
def register(verb):
'''
Registers the given verb class
'''
from feedly.verbs.base import Verb
if not issubclass(verb, Verb):
raise ValueError('%s doesnt subclass Verb' % verb)
registered_verb = get_verb_storage().get(verb.id, verb)
if registered_verb != verb:
raise ValueError(
'cant register verb %r with id %s (clashing with verb %r)' %
(verb, verb.id, registered_verb))
get_verb_storage()[verb.id] = verb
def get_verb_by_id(verb_id):
if not isinstance(verb_id, int):
raise ValueError('please provide a verb id, got %r' % verb_id)
return get_verb_storage()[verb_id]
| from feedly.utils import get_class_from_string
VERB_DICT = dict()
def get_verb_storage():
from feedly import settings
if settings.FEEDLY_VERB_STORAGE == 'in-memory':
return VERB_DICT
else:
return get_class_from_string(settings.FEEDLY_VERB_STORAGE)
def register(verb):
'''
Registers the given verb class
'''
from feedly.verbs.base import Verb
if not issubclass(verb, Verb):
raise ValueError('%s doesnt subclass Verb' % verb)
registered_verb = get_verb_storage().get(verb.id, verb)
if registered_verb != verb:
raise ValueError(
'cant register verb %r with id %s (clashing with verb %r)' %
(verb, verb.id, registered_verb))
get_verb_storage()[verb.id] = verb
def get_verb_by_id(verb_id):
if not isinstance(verb_id, int):
raise ValueError('please provide a verb id, got %r' % verb_id)
return get_verb_storage()[verb_id]
| bsd-3-clause | Python |
7a1ae8e1a0c614ddb2e3f9e441ab2fb97ed5fc25 | Bump to 6.0.0 | cheekiatng/titanium_mobile,jhaynie/titanium_mobile,ashcoding/titanium_mobile,AngelkPetkov/titanium_mobile,mano-mykingdom/titanium_mobile,ashcoding/titanium_mobile,cheekiatng/titanium_mobile,jhaynie/titanium_mobile,ashcoding/titanium_mobile,jhaynie/titanium_mobile,cheekiatng/titanium_mobile,AngelkPetkov/titanium_mobile,AngelkPetkov/titanium_mobile,cheekiatng/titanium_mobile,ashcoding/titanium_mobile,cheekiatng/titanium_mobile,jhaynie/titanium_mobile,mano-mykingdom/titanium_mobile,benbahrenburg/titanium_mobile,falkolab/titanium_mobile,falkolab/titanium_mobile,collinprice/titanium_mobile,falkolab/titanium_mobile,cheekiatng/titanium_mobile,collinprice/titanium_mobile,AngelkPetkov/titanium_mobile,cheekiatng/titanium_mobile,collinprice/titanium_mobile,falkolab/titanium_mobile,mano-mykingdom/titanium_mobile,jhaynie/titanium_mobile,falkolab/titanium_mobile,ashcoding/titanium_mobile,mano-mykingdom/titanium_mobile,falkolab/titanium_mobile,ashcoding/titanium_mobile,ashcoding/titanium_mobile,mano-mykingdom/titanium_mobile,benbahrenburg/titanium_mobile,AngelkPetkov/titanium_mobile,collinprice/titanium_mobile,benbahrenburg/titanium_mobile,mano-mykingdom/titanium_mobile,jhaynie/titanium_mobile,jhaynie/titanium_mobile,collinprice/titanium_mobile,collinprice/titanium_mobile,AngelkPetkov/titanium_mobile,mano-mykingdom/titanium_mobile,AngelkPetkov/titanium_mobile,ashcoding/titanium_mobile,collinprice/titanium_mobile,cheekiatng/titanium_mobile,benbahrenburg/titanium_mobile,AngelkPetkov/titanium_mobile,jhaynie/titanium_mobile,benbahrenburg/titanium_mobile,benbahrenburg/titanium_mobile,collinprice/titanium_mobile,falkolab/titanium_mobile,mano-mykingdom/titanium_mobile,benbahrenburg/titanium_mobile,benbahrenburg/titanium_mobile,falkolab/titanium_mobile | build/titanium_version.py | build/titanium_version.py | version = '6.0.0'
module_apiversion = '2'
| version = '5.2.0'
module_apiversion = '2'
| apache-2.0 | Python |
8decbeb6a3928668581d0f671ae87a21f7d2c6b7 | add in sentinel support | ortoo/schooldata | update_school_data.py | update_school_data.py | import governorhub
import logging
import redis
import os
import loggly.handlers
from datetime import datetime
from similar_schools import update_similar_schools
from dfe_data import update_dfe_data
logging.basicConfig(level=logging.INFO)
# Turn off requests INFO level logging
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
REDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
SENTINEL_HOST = os.environ.get('SENTINEL_HOST', None)
SENTINEL_PORT = os.environ.get('SENTINEL_PORT', 26379)
SENTINEL_MASTER = os.environ.get('SENTINEL_MASTER', 'base')
LOGGLY_TOKEN = os.environ.get('LOGGLY_TOKEN', None)
UPDATE_CHAN = 'or2:school:updatedata:channel'
UPDATE_Q = 'or2:school:updatedataq'
if LOGGLY_TOKEN is not None:
handler = loggly.handlers.HTTPSHandler('https://logs-01.loggly.com/inputs/%s/tag/school-data' % LOGGLY_TOKEN)
logging.getLogger('').addHandler(handler)
governorhub.connect()
School = governorhub.ModelType('school')
def update_school(school):
if getattr(school, 'manualData', False):
logging.warning('School requested that has manual data: %s. Not processing' % school._id)
return
update_similar_schools(school)
update_dfe_data(school)
setattr(school, 'lastRefreshed', datetime.now())
school.save()
def clear_queue(client):
while True:
try:
schoolId = client.lpop(UPDATE_Q)
if schoolId is None:
break
schoolId = schoolId.decode('utf-8')
try:
logging.info('Updating ' + schoolId)
school = School.get(schoolId)
update_school(school)
logging.info('Updated ' + schoolId)
except Exception as ex:
logging.error('Error updating data for school: ' + schoolId)
logging.exception(ex)
except Exception as ex:
logging.exception(ex)
def listen_for_requests():
if SENTINEL_HOST is None:
client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
else:
sentinel = Sentinel([(SENTINEL_HOST, SENTINEL_PORT)])
client = sentinel.master_for(SENTINEL_MASTER)
clear_queue(client)
ps = client.pubsub()
ps.subscribe(UPDATE_CHAN)
# Hang until we get a message
try:
for message in ps.listen():
try:
if message['type'] == 'message':
data = message['data'].decode('utf-8')
if data == 'update':
clear_queue(client)
except Exception as ex:
logging.exception(ex)
finally:
ps.close()
if __name__ == '__main__':
listen_for_requests()
| import governorhub
import logging
import redis
import os
import loggly.handlers
from datetime import datetime
from similar_schools import update_similar_schools
from dfe_data import update_dfe_data
logging.basicConfig(level=logging.INFO)
# Turn off requests INFO level logging
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
REDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
LOGGLY_TOKEN = os.environ.get('LOGGLY_TOKEN', None)
UPDATE_CHAN = 'or2:school:updatedata:channel'
UPDATE_Q = 'or2:school:updatedataq'
if LOGGLY_TOKEN is not None:
handler = loggly.handlers.HTTPSHandler('https://logs-01.loggly.com/inputs/%s/tag/school-data' % LOGGLY_TOKEN)
logging.getLogger('').addHandler(handler)
governorhub.connect()
School = governorhub.ModelType('school')
def update_school(school):
if getattr(school, 'manualData', False):
logging.warning('School requested that has manual data: %s. Not processing' % school._id)
return
update_similar_schools(school)
update_dfe_data(school)
setattr(school, 'lastRefreshed', datetime.now())
school.save()
def clear_queue(client):
while True:
try:
schoolId = client.lpop(UPDATE_Q)
if schoolId is None:
break
schoolId = schoolId.decode('utf-8')
try:
logging.info('Updating ' + schoolId)
school = School.get(schoolId)
update_school(school)
logging.info('Updated ' + schoolId)
except Exception as ex:
logging.error('Error updating data for school: ' + schoolId)
logging.exception(ex)
except Exception as ex:
logging.exception(ex)
def listen_for_requests():
client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
clear_queue(client)
ps = client.pubsub()
ps.subscribe(UPDATE_CHAN)
# Hang until we get a message
try:
for message in ps.listen():
try:
if message['type'] == 'message':
data = message['data'].decode('utf-8')
if data == 'update':
clear_queue(client)
except Exception as ex:
logging.exception(ex)
finally:
ps.close()
if __name__ == '__main__':
listen_for_requests()
| mit | Python |
9348b7df383e7a9fa970b7ad0692f7be280f6b79 | fix scoping issue with exception handling | felliott/scrapi,fabianvf/scrapi,felliott/scrapi,erinspace/scrapi,erinspace/scrapi,fabianvf/scrapi,CenterForOpenScience/scrapi,CenterForOpenScience/scrapi | scrapi/util.py | scrapi/util.py | from datetime import datetime
import six
import pytz
import time
import logging
logger = logging.getLogger()
xrange = six.moves.xrange
def timestamp():
return pytz.utc.localize(datetime.utcnow()).isoformat()
def copy_to_unicode(element):
""" used to transform the lxml version of unicode to a
standard version of unicode that can be pickalable -
necessary for linting """
if isinstance(element, dict):
return {
key: copy_to_unicode(val)
for key, val in element.items()
}
elif isinstance(element, list):
return list(map(copy_to_unicode, element))
else:
try:
# A dirty way to convert to unicode in python 2 + 3.3+
return u''.join(element)
except TypeError:
return element
def stamp_from_raw(raw_doc, **kwargs):
kwargs['normalizeFinished'] = timestamp()
stamps = raw_doc['timestamps']
stamps.update(kwargs)
return stamps
def format_date_with_slashes(date):
return date.strftime('%m/%d/%Y')
def json_without_bytes(jobj):
"""
An ugly hack.
Before we treat a structure as JSON, ensure that bytes are decoded to str.
"""
# Create a JSON-compatible copy of the attributes for validation
jobj = jobj.copy()
for k, v in jobj.items():
if isinstance(v, six.binary_type):
jobj[k] = v.decode('utf8')
return jobj
def try_n_times(n, action, *args, **kwargs):
exc = None
for _ in xrange(n):
try:
return action(*args, **kwargs)
except Exception as e:
exc = e
logger.exception(e)
time.sleep(15)
if exc:
raise exc
| from datetime import datetime
import six
import pytz
import time
import logging
logger = logging.getLogger()
xrange = six.moves.xrange
def timestamp():
return pytz.utc.localize(datetime.utcnow()).isoformat()
def copy_to_unicode(element):
""" used to transform the lxml version of unicode to a
standard version of unicode that can be pickalable -
necessary for linting """
if isinstance(element, dict):
return {
key: copy_to_unicode(val)
for key, val in element.items()
}
elif isinstance(element, list):
return list(map(copy_to_unicode, element))
else:
try:
# A dirty way to convert to unicode in python 2 + 3.3+
return u''.join(element)
except TypeError:
return element
def stamp_from_raw(raw_doc, **kwargs):
kwargs['normalizeFinished'] = timestamp()
stamps = raw_doc['timestamps']
stamps.update(kwargs)
return stamps
def format_date_with_slashes(date):
return date.strftime('%m/%d/%Y')
def json_without_bytes(jobj):
"""
An ugly hack.
Before we treat a structure as JSON, ensure that bytes are decoded to str.
"""
# Create a JSON-compatible copy of the attributes for validation
jobj = jobj.copy()
for k, v in jobj.items():
if isinstance(v, six.binary_type):
jobj[k] = v.decode('utf8')
return jobj
def try_n_times(n, action, *args, **kwargs):
for _ in xrange(n):
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(15)
if e:
raise e
| apache-2.0 | Python |
a3ac67a4174e01b893aa1c3ae72c251ee59fcec8 | format code | ponty/entrypoint2,ponty/entrypoint2 | doc/generate-doc.py | doc/generate-doc.py | import glob
import logging
import os
from easyprocess import EasyProcess
from entrypoint2 import entrypoint
commands = [
"python3 -m entrypoint2.examples.hello 1",
"python3 -m entrypoint2.examples.hello 1 --two 1",
"python3 -m entrypoint2.examples.hello 1 -t 1",
"python3 -m entrypoint2.examples.hello 1 --three",
"python3 -m entrypoint2.examples.hello 1 --debug",
"python3 -m entrypoint2.examples.hello",
"python3 -m entrypoint2.examples.hello --help",
"python3 -m entrypoint2.examples.hello --version",
"python3 -m entrypoint2.examples.repeating --help",
"python3 -m entrypoint2.examples.repeating -f input1.txt -f input2.txt",
]
def empty_dir(dir):
files = glob.glob(os.path.join(dir, "*"))
for f in files:
os.remove(f)
@entrypoint
def main():
gendir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "gen")
logging.info("gendir: %s", gendir)
os.makedirs(gendir, exist_ok=True)
empty_dir(gendir)
try:
os.chdir("gen")
for cmd in commands:
logging.info("cmd: %s", cmd)
fname_base = cmd.replace(" ", "_")
fname = fname_base + ".txt"
logging.info("cmd: %s", cmd)
print("file name: %s" % fname)
with open(fname, "w") as f:
f.write("$ " + cmd + "\n")
p = EasyProcess(cmd).call()
f.write(p.stderr)
if p.stderr and p.stdout:
f.write("\n")
f.write(p.stdout)
finally:
os.chdir("..")
embedme = EasyProcess(["npx", "embedme", "../README.md"])
embedme.call()
print(embedme.stdout)
assert embedme.return_code == 0
assert not "but file does not exist" in embedme.stdout
| import logging
import os
import glob
from entrypoint2 import entrypoint
from easyprocess import EasyProcess
commands = [
"python3 -m entrypoint2.examples.hello 1",
"python3 -m entrypoint2.examples.hello 1 --two 1",
"python3 -m entrypoint2.examples.hello 1 -t 1",
"python3 -m entrypoint2.examples.hello 1 --three",
"python3 -m entrypoint2.examples.hello 1 --debug",
"python3 -m entrypoint2.examples.hello",
"python3 -m entrypoint2.examples.hello --help",
"python3 -m entrypoint2.examples.hello --version",
"python3 -m entrypoint2.examples.repeating --help",
"python3 -m entrypoint2.examples.repeating -f input1.txt -f input2.txt",
]
def empty_dir(dir):
files = glob.glob(os.path.join(dir, "*"))
for f in files:
os.remove(f)
@entrypoint
def main():
gendir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "gen")
logging.info("gendir: %s", gendir)
os.makedirs(gendir, exist_ok=True)
empty_dir(gendir)
pls = []
try:
os.chdir("gen")
for cmd in commands:
logging.info("cmd: %s", cmd)
fname_base = cmd.replace(" ", "_")
fname = fname_base + ".txt"
logging.info("cmd: %s", cmd)
print("file name: %s" % fname)
with open(fname, "w") as f:
f.write("$ " + cmd + "\n")
p = EasyProcess(cmd).call()
f.write(p.stderr)
if p.stderr and p.stdout:
f.write("\n")
f.write(p.stdout)
pls += [p]
finally:
os.chdir("..")
embedme = EasyProcess(["npx", "embedme", "../README.md"])
embedme.call()
print(embedme.stdout)
assert embedme.return_code == 0
assert not "but file does not exist" in embedme.stdout
| bsd-2-clause | Python |
b4e97efee571c51d686668de480a23abf3da4f7a | Move color selection of rows outside the addstr() method | leonkoens/utop | utop/views/content.py | utop/views/content.py | from utop import model
from utop.model import Model
from utop.view import View
class Content(View):
""" Draw the content of utop. This is the list of users with the
other selected columns.
"""
def draw(self):
""" Draw the whole pane. """
self.window.erase()
self.draw_headers()
self.draw_columns()
def draw_columns(self):
""" Draw the values of the sorted columns, per line. """
i = 1 # Depends on what line the headers are.
list_max = max(0, self.model.maxy - i)
for user in self.model.sorted_users[:list_max]:
j = 0
user_data = self.model.user_data[user]
for key in self.model.sorted_columns:
value = Model.columns[key]['format'].format(user_data[key])
width = Model.columns[key]['width']
color = model.COLOR_DEFAULT
if i - 1 == self.model.selected_row:
color = model.COLOR_CYAN
self.addstr(i, j, value[:width-2], color=color)
j += width
i += 1
def draw_headers(self):
""" Draw the headers of the content view. """
i = 0
for key in self.model.sorted_columns:
color = model.COLOR_DEFAULT
if self.model.sort_by == key:
color = model.COLOR_CYAN
self.addstr(0, i, self.model.columns[key]['title'][0], underline=True, color=color)
self.addstr(0, i+1, self.model.columns[key]['title'][1:], color=color)
i += self.model.columns[key]['width']
| from utop import model
from utop.model import Model
from utop.view import View
class Content(View):
""" Draw the content of utop. This is the list of users with the
other selected columns.
"""
def draw(self):
""" Draw the whole pane. """
self.window.erase()
self.draw_headers()
self.draw_columns()
def draw_columns(self):
""" Draw the values of the sorted columns, per line. """
i = 1 # Depends on what line the headers are.
list_max = max(0, self.model.maxy - i)
for user in self.model.sorted_users[:list_max]:
j = 0
user_data = self.model.user_data[user]
for key in self.model.sorted_columns:
value = Model.columns[key]['format'].format(user_data[key])
width = Model.columns[key]['width']
if i - 1 == self.model.selected_row:
self.addstr(i, j, value[:width-2], color=model.COLOR_CYAN)
else:
self.addstr(i, j, value[:width-2])
j += width
i += 1
def draw_headers(self):
""" Draw the headers of the content view. """
i = 0
for key in self.model.sorted_columns:
color = model.COLOR_DEFAULT
if self.model.sort_by == key:
color = model.COLOR_CYAN
self.addstr(0, i, self.model.columns[key]['title'][0], underline=True, color=color)
self.addstr(0, i+1, self.model.columns[key]['title'][1:], color=color)
i += self.model.columns[key]['width']
| bsd-3-clause | Python |
a92a611875f065a49f90897e8f113081d7882e4b | Optimize OpCompositeConstruct -> OpConstantComposite | kristerw/spirv-tools | constprop.py | constprop.py | """Change instructions having only constant operands to a constant.
This pass tends to leave dead instructions, so dead_code_elim should
be run after."""
import ir
def optimize_OpCompositeConstruct(module, inst):
new_inst = ir.Instruction(module, 'OpConstantComposite', module.new_id(),
inst.type_id, inst.operands[:])
module.add_global_inst(new_inst)
return new_inst
def optimize_OpCompositeExtract(module, inst):
result_inst = inst.operands[0].inst
for index in inst.operands[1:]:
result_inst = result_inst.operands[index].inst
return result_inst
def optimize_OpVectorShuffle(module, inst):
vec1_inst = inst.operands[0].inst
vec1_len = len(vec1_inst.operands)
vec2_inst = inst.operands[1].inst
components = []
for component in inst.operands[2:]:
if component == 0xffffffff:
# Undefined component, so we may choose any value, e.g. the
# first element from vector 1.
components.append(vec1_inst.operands[0])
elif component < vec1_len:
components.append(vec1_inst.operands[component])
else:
components.append(vec2_inst.operands[component - vec1_len])
new_inst = ir.Instruction(module, 'OpConstantComposite', module.new_id(),
inst.type_id, components)
new_inst.insert_before(inst)
return new_inst
def optimize_inst(module, inst):
"""Simplify one instruction"""
for operand in inst.operands:
if isinstance(operand, ir.Id):
if operand.inst.op_name not in ir.CONSTANT_INSTRUCTIONS:
return inst
if inst.op_name == 'OpCompositeConstruct':
inst = optimize_OpCompositeConstruct(module, inst)
elif inst.op_name == 'OpCompositeExtract':
inst = optimize_OpCompositeExtract(module, inst)
elif inst.op_name == 'OpVectorShuffle':
inst = optimize_OpVectorShuffle(module, inst)
return inst
def optimize(module):
"""Simple constant propagation and merging"""
for function in module.functions:
for inst in function.instructions():
optimized_inst = optimize_inst(module, inst)
if optimized_inst != inst:
inst.replace_uses_with(optimized_inst)
module.finalize()
| """Change instructions having only constant operands to a constant.
This pass tends to leave dead instructions, so dead_code_elim should
be run after."""
import ir
def optimize_OpCompositeExtract(module, inst):
result_inst = inst.operands[0].inst
for index in inst.operands[1:]:
result_inst = result_inst.operands[index].inst
return result_inst
def optimize_OpVectorShuffle(module, inst):
vec1_inst = inst.operands[0].inst
vec1_len = len(vec1_inst.operands)
vec2_inst = inst.operands[1].inst
components = []
for component in inst.operands[2:]:
if component == 0xffffffff:
# Undefined component, so we may choose any value, e.g. the
# first element from vector 1.
components.append(vec1_inst.operands[0])
elif component < vec1_len:
components.append(vec1_inst.operands[component])
else:
components.append(vec2_inst.operands[component - vec1_len])
new_inst = ir.Instruction(module, 'OpConstantComposite', module.new_id(),
inst.type_id, components)
new_inst.insert_before(inst)
return new_inst
def optimize_inst(module, inst):
"""Simplify one instruction"""
for operand in inst.operands:
if isinstance(operand, ir.Id):
if operand.inst.op_name not in ir.CONSTANT_INSTRUCTIONS:
return inst
if inst.op_name == 'OpCompositeExtract':
inst = optimize_OpCompositeExtract(module, inst)
elif inst.op_name == 'OpVectorShuffle':
inst = optimize_OpVectorShuffle(module, inst)
return inst
def optimize(module):
"""Simple constant propagation and merging"""
for function in module.functions:
for inst in function.instructions():
optimized_inst = optimize_inst(module, inst)
if optimized_inst != inst:
inst.replace_uses_with(optimized_inst)
module.finalize()
| mit | Python |
208f90497c7a6867f9aeece84b1161926ca1627b | Simplify nethack protocol to a single method. | ryansb/netHUD | nethud/nh_client.py | nethud/nh_client.py | """
An example client. Run simpleserv.py first before running this.
"""
import json
from twisted.internet import reactor, protocol
# a client protocol
class EchoClient(protocol.Protocol):
"""Once connected, send a message, then print the result."""
def connectionMade(self):
self.send_message('auth', username='Qalthos', password='password')
def dataReceived(self, data):
"As soon as any data is received, write it back."
print "Server said:", data
def connectionLost(self, reason):
print "Connection lost"
# Nethack Protocol Wrapper
def send_message(self, command, **kw):
data = json.dumps(dict(command=kw))
self.transport.write(data)
class EchoFactory(protocol.ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print "Connection failed - goodbye!"
reactor.stop()
def clientConnectionLost(self, connector, reason):
print "Connection lost - goodbye!"
reactor.stop()
# this connects the protocol to a server runing on port 8000
def main():
f = EchoFactory()
reactor.connectTCP("games-ng.csh.rit.edu", 53421, f)
reactor.run()
# this only runs if the module was *not* imported
if __name__ == '__main__':
main()
| """
An example client. Run simpleserv.py first before running this.
"""
import json
from twisted.internet import reactor, protocol
# a client protocol
class EchoClient(protocol.Protocol):
"""Once connected, send a message, then print the result."""
def connectionMade(self):
data = '{"register": {"email": "Qalthos@gmail.com", ' + \
'"username": "Qalthos",' + \
'"password": "password"}}'
#~ data = '{"auth": {"username": "Qalthos", "password": "password"}}'
print data
self.transport.write(data)
def dataReceived(self, data):
"As soon as any data is received, write it back."
print "Server said:", data
def connectionLost(self, reason):
print "Connection lost"
class EchoFactory(protocol.ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print "Connection failed - goodbye!"
reactor.stop()
def clientConnectionLost(self, connector, reason):
print "Connection lost - goodbye!"
reactor.stop()
# this connects the protocol to a server runing on port 8000
def main():
f = EchoFactory()
reactor.connectTCP("games-ng.csh.rit.edu", 53421, f)
reactor.run()
# this only runs if the module was *not* imported
if __name__ == '__main__':
main()
| mit | Python |
de4842fd3c587e74dcaa7ea4df036e1e7ab93b1a | Update compass.py | kived/plyer,kivy/plyer,KeyWeeUsr/plyer,johnbolia/plyer,kostyll/plyer,kivy/plyer,cleett/plyer,johnbolia/plyer,kived/plyer,cleett/plyer,KeyWeeUsr/plyer,KeyWeeUsr/plyer,kivy/plyer,kostyll/plyer | plyer/platforms/ios/compass.py | plyer/platforms/ios/compass.py | '''
iOS Compass
---------------------
'''
from plyer.facades import Compass
from pyobjus import autoclass
class IosCompass(Compass):
def __init__(self):
super(IosCompass, self).__init__()
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setMagnetometerUpdateInterval_(0.1)
def _enable(self):
self.bridge.startMagnetometer()
def _disable(self):
self.bridge.stopMagnetometer()
def _get_orientation(self):
return (
self.bridge.mg_x,
self.bridge.mg_y,
self.bridge.mg_z)
def instance():
return IosCompass()
| '''
iOS Compass
---------------------
'''
from plyer.facades import Compass
from pyobjus import autoclass
Hardware = autoclass('org.renpy.Ios.Hardware')
class IosCompass(Compass):
def __init__(self):
super(IosCompass, self).__init__()
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setMagnetometerUpdateInterval_(0.1)
def _enable(self):
self.bridge.startMagnetometer()
def _disable(self):
self.bridge.stopMagnetometer()
def _get_orientation(self):
return (
self.bridge.mg_x,
self.bridge.mg_y,
self.bridge.mg_z)
def instance():
return IosCompass()
| mit | Python |
986a5815d890f6b52747319b477fca17742b19bf | fix ConnectionWithTrace not using its timeout parameter | jvf/scalaris,caijieming-baidu/scalaris,caijieming-baidu/scalaris,scalaris-team/scalaris,caijieming-baidu/scalaris,jvf/scalaris,jvf/scalaris,caijieming-baidu/scalaris,caijieming-baidu/scalaris,jvf/scalaris,caijieming-baidu/scalaris,caijieming-baidu/scalaris,scalaris-team/scalaris,jvf/scalaris,jvf/scalaris,scalaris-team/scalaris,scalaris-team/scalaris,scalaris-team/scalaris,scalaris-team/scalaris,scalaris-team/scalaris,scalaris-team/scalaris,jvf/scalaris | python-api/scalaris_userdevguide_jsontrace.py | python-api/scalaris_userdevguide_jsontrace.py | #!/usr/bin/python
# Copyright 2011 Zuse Institute Berlin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import scalaris
class ConnectionWithTrace(scalaris.JSONConnection):
def __init__(self, url = scalaris.DEFAULT_URL, timeout = scalaris.DEFAULT_TIMEOUT):
scalaris.JSONConnection.__init__(self, url = scalaris.DEFAULT_URL, timeout = timeout)
def call(self, function, params):
params = {'jsonrpc': '2.0',
'method': function,
'params': params,
'id': 0}
# use compact JSON encoding:
params_json = json.dumps(params, separators=(',',':'))
print ''
print 'request:'
print json.dumps(params, indent=1)
headers = {"Content-type": "application/json"}
try:
self._conn.request("POST", scalaris.DEFAULT_PATH, params_json, headers)
response = self._conn.getresponse()
if (response.status < 200 or response.status >= 300):
raise scalaris.ConnectionError(response)
data = response.read()
except Exception as instance:
raise scalaris.ConnectionError(instance)
print ''
print 'response:'
print json.dumps(json.loads(data), indent=1)
response_json = json.loads(data)
return response_json['result']
if __name__ == "__main__":
sc1 = scalaris.Transaction(conn = ConnectionWithTrace())
sc1.req_list(sc1.new_req_list().add_write("keyA", "valueA").add_write("keyB", "valueB").add_commit())
sc1.close_connection()
sc2 = scalaris.Transaction(conn = ConnectionWithTrace())
sc2.req_list(sc2.new_req_list().add_read("keyA").add_read("keyB"))
sc2.req_list(sc2.new_req_list().add_write("keyA", "valueA2").add_commit())
| #!/usr/bin/python
# Copyright 2011 Zuse Institute Berlin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import scalaris
class ConnectionWithTrace(scalaris.JSONConnection):
def __init__(self, url = scalaris.DEFAULT_URL, timeout = scalaris.DEFAULT_TIMEOUT):
scalaris.JSONConnection.__init__(self, url = scalaris.DEFAULT_URL, timeout = scalaris.DEFAULT_TIMEOUT)
def call(self, function, params):
params = {'jsonrpc': '2.0',
'method': function,
'params': params,
'id': 0}
# use compact JSON encoding:
params_json = json.dumps(params, separators=(',',':'))
print ''
print 'request:'
print json.dumps(params, indent=1)
headers = {"Content-type": "application/json"}
try:
self._conn.request("POST", scalaris.DEFAULT_PATH, params_json, headers)
response = self._conn.getresponse()
if (response.status < 200 or response.status >= 300):
raise scalaris.ConnectionError(response)
data = response.read()
except Exception as instance:
raise scalaris.ConnectionError(instance)
print ''
print 'response:'
print json.dumps(json.loads(data), indent=1)
response_json = json.loads(data)
return response_json['result']
if __name__ == "__main__":
sc1 = scalaris.Transaction(conn = ConnectionWithTrace())
sc1.req_list(sc1.new_req_list().add_write("keyA", "valueA").add_write("keyB", "valueB").add_commit())
sc1.close_connection()
sc2 = scalaris.Transaction(conn = ConnectionWithTrace())
sc2.req_list(sc2.new_req_list().add_read("keyA").add_read("keyB"))
sc2.req_list(sc2.new_req_list().add_write("keyA", "valueA2").add_commit())
| apache-2.0 | Python |
a2ea05ee144dbcab22d17897df3ba01467b47497 | test of meanvT for cold population for power-law rotation curves | jobovy/galpy,jobovy/galpy,jobovy/galpy,followthesheep/galpy,followthesheep/galpy,jobovy/galpy,followthesheep/galpy,followthesheep/galpy | nose/test_diskdf.py | nose/test_diskdf.py | # Tests of the diskdf module: distribution functions from Dehnen (1999)
import numpy
from galpy.df import dehnendf, shudf
# Tests for cold population, flat rotation curve: <vt> =~ v_c
def test_dehnendf_cold_flat_vt():
df= dehnendf(profileParams=(0.3333333333333333,1.0, 0.01),
beta=0.,correct=False)
assert numpy.fabs(df.meanvT(1.)-1.) < 10.**-3., 'mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=1'
assert numpy.fabs(df.meanvT(0.5)-1.) < 10.**-3., 'mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=0.5'
assert numpy.fabs(df.meanvT(2.)-1.) < 10.**-3., 'mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=2'
return None
# Tests for cold population, power-law rotation curve: <vt> =~ v_c
def test_dehnendf_cold_powerrise_vt():
# Rising rotation curve
beta= 0.2
df= dehnendf(profileParams=(0.3333333333333333,1.0, 0.01),
beta=beta,correct=False)
assert numpy.fabs(df.meanvT(1.)-1.) < 10.**-3., 'mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=1'
assert numpy.fabs(df.meanvT(0.5)-(0.5)**beta) < 10.**-3., 'mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=0.5'
assert numpy.fabs(df.meanvT(2.)-(2.)**beta) < 10.**-3., 'mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=2'
def test_dehnendf_cold_powerfall_vt():
# Falling rotation curve
beta= -0.2
df= dehnendf(profileParams=(0.3333333333333333,1.0, 0.01),
beta=beta,correct=False)
assert numpy.fabs(df.meanvT(1.)-1.) < 10.**-3., 'mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=1'
assert numpy.fabs(df.meanvT(0.5)-(0.5)**beta) < 10.**-3., 'mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=0.5'
assert numpy.fabs(df.meanvT(2.)-(2.)**beta) < 10.**-3., 'mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=2'
return None
| # Tests of the diskdf module: distribution functions from Dehnen (1999)
import numpy
from galpy.df import dehnendf, shudf
# Tests for cold population, flat rotation curve: <vt> =~ v_c
def test_dehnendf_cold_flat_vt():
df= dehnendf(profileParams=(0.3333333333333333,1.0, 0.01),
beta=0.,correct=False)
assert numpy.fabs(df.meanvT(1.)-1.) < 10.**-3., 'mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=1'
assert numpy.fabs(df.meanvT(0.5)-1.) < 10.**-3., 'mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=0.5'
assert numpy.fabs(df.meanvT(2.)-1.) < 10.**-3., 'mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=2'
return None
| bsd-3-clause | Python |
881e693d16d12109c3ececffda61336b020c172a | Test sqlite and mongoquery variations. | ericdill/databroker,ericdill/databroker | portable_mds/tests/conftest.py | portable_mds/tests/conftest.py | import os
import tempfile
import shutil
import tzlocal
import pytest
import portable_mds.mongoquery.mds
import portable_mds.sqlite.mds
variations = [portable_mds.mongoquery.mds,
portable_mds.sqlite.mds]
@pytest.fixture(params=variations, scope='function')
def mds_all(request):
'''Provide a function level scoped FileStore instance talking to
temporary database on localhost:27017 with both v0 and v1.
'''
tempdirname = tempfile.mkdtemp()
mds = request.param.MDS({'directory': tempdirname,
'timezone': tzlocal.get_localzone().zone}, version=1)
filenames = ['run_starts.json', 'run_stops.json', 'event_descriptors.json',
'events.json']
for fn in filenames:
with open(os.path.join(tempdirname, fn), 'w') as f:
f.write('[]')
def delete_dm():
shutil.rmtree(tempdirname)
request.addfinalizer(delete_dm)
return mds
| import os
import tempfile
import shutil
import tzlocal
import pytest
from ..mongoquery.mds import MDS
@pytest.fixture(params=[1], scope='function')
def mds_all(request):
'''Provide a function level scoped FileStore instance talking to
temporary database on localhost:27017 with both v0 and v1.
'''
ver = request.param
tempdirname = tempfile.mkdtemp()
mds = MDS({'directory': tempdirname,
'timezone': tzlocal.get_localzone().zone}, version=ver)
filenames = ['run_starts.json', 'run_stops.json', 'event_descriptors.json',
'events.json']
for fn in filenames:
with open(os.path.join(tempdirname, fn), 'w') as f:
f.write('[]')
def delete_dm():
shutil.rmtree(tempdirname)
request.addfinalizer(delete_dm)
return mds
| bsd-3-clause | Python |
d789c4c1a5ef3f2d2003a18aa58cbafe86d5c85e | update funmath | misssoft/Fan.Python | src/funmath.py | src/funmath.py | import sys
from math import factorial
from math import sqrt
def calculateFactorial(num):
x = range(0, num)
f = [factorial(x) for x in range(num)]
l = [len(str(factorial(x))) for x in range(num)]
for i in range(num):
print({i},"-", {f[i]}, "-", {l[i]} )
def calculateFactorialinloop(num):
for i in range(num):
print({i}, "=", factorial(i), "=", len(str(factorial(i))))
def calculateFibonacci(num):
if num>1:
sum = calculateFibonacci(num - 2) + calculateFibonacci(num -1)
print('fab(',{num},') = ',{sum})
return sum
print('fab(',{num},') = ',{num})
return num
def isPrime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x % i == 0:
return False
print(x)
return True
def printPrime(num):
y = [x for x in range(num) if isPrime(x)]
for item in y:
print(item)
def main(x):
try:
#calculateFactorial(x)
#calculateFactorialinloop((x))
#calculateFibonacci(x)
printPrime(x)
except ValueError as e:
print(e, file=sys.stderr)
print("Continues")
if __name__ == '__main__':
main(sys.argv[1]) | import sys
from math import factorial
def calculateFactorial(num):
x = range(0, num)
f = [factorial(x) for x in range(num)]
l = [len(str(factorial(x))) for x in range(num)]
for i in range(num):
print({i},"-", {f[i]}, "-", {l[i]} )
def main(x):
try:
calculateFactorial((x))
except ValueError as e:
print(e, file=sys.stderr)
print("Continues")
if __name__ == '__main__':
main(sys.argv[1]) | mit | Python |
25e060157e11d68a6fcfe826af437f7ff105d161 | bump version | piotrjakimiak/cmsplugin-text-ng,360youlun/cmsplugin-text-ng,KristianOellegaard/cmsplugin-text-ng | cmsplugin_text_ng/__init__.py | cmsplugin_text_ng/__init__.py | __version__ = '0.5'
| __version__ = '0.4'
| bsd-3-clause | Python |
84a21b5ee1868454f53c33e28d3da18bd499d8a2 | update comments | devlights/try-python | trypython/stdlib/secrets01.py | trypython/stdlib/secrets01.py | # coding: utf-8
"""
secretsモジュールについてのサンプルです。
"""
import secrets
import string
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
#
# secretsモジュールは、3.6から追加された標準モジュール
# 文字通りセキュアな値を管理することを目的としている
#
# パスワードやトークンの生成時に利用できる
#
# 参考URL:
# https://www.blog.pythonlibrary.org/2017/02/16/pythons-new-secrets-module/
#
pr('generate passwd', self.generate_password(32))
pr('url_token', secrets.token_urlsafe(32))
def generate_password(self, nbytes=8):
"""指定されたバイト数でパスワードを生成します。"""
characters = string.ascii_letters + string.digits
generate_chars = (secrets.choice(characters) for __ in range(nbytes))
return ''.join(generate_chars)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| # coding: utf-8
"""
secretsモジュールについてのサンプルです。
"""
import secrets
import string
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
#
# secretsモジュールは、3.6から追加された標準モジュール
# 文字通りセキュアな値を管理することを目的としている
#
# パスワードやトークンの生成時に利用できる
#
pr('generate passwd', self.generate_password(32))
pr('url_token', secrets.token_urlsafe(32))
def generate_password(self, nbytes=8):
"""指定されたバイト数でパスワードを生成します。"""
characters = string.ascii_letters + string.digits
generate_chars = (secrets.choice(characters) for __ in range(nbytes))
return ''.join(generate_chars)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python |
f436e8f4fe774e73f59e0cda93be08dad9c1ce7d | convert & to &, this sucks | akrherz/pyWWA,akrherz/pyWWA | util/get_text_from_nwschat.py | util/get_text_from_nwschat.py | '''
Hit the NWSChat quasi web service to get the text IEM may have missed :(
https://nwschat.weather.gov/vtec/json-text.php?
year=2014&wfo=DDC&phenomena=SV&eventid=0020&significance=W
'''
import urllib2
import sys
import json
year = sys.argv[1]
wfo = sys.argv[2]
phenomena = sys.argv[3]
significance = sys.argv[4]
eventid = sys.argv[5]
def wrap(data):
''' convert data into more noaaportish '''
return "\001" + data.replace("&", "&").replace("\n", "\r\r\n") +"\003"
def process(j):
''' Process the json data j '''
if len(j['data']) == 0:
print 'ERROR: No results found!'
return
out = open('/tmp/vtec_data.txt', 'w')
out.write( wrap( j['data'][0]['report'] ) )
for svs in j['data'][0]['svs']:
out.write( wrap( svs ) )
out.close()
if __name__ == '__main__':
uri = ("https://nwschat.weather.gov/vtec/json-text.php?"+
"year=%s&wfo=%s&phenomena=%s&eventid=%s&significance=%s") % (
year, wfo, phenomena, eventid, significance)
data = urllib2.urlopen(uri).read()
j = json.loads(data)
process(j) | '''
Hit the NWSChat quasi web service to get the text IEM may have missed :(
https://nwschat.weather.gov/vtec/json-text.php?
year=2014&wfo=DDC&phenomena=SV&eventid=0020&significance=W
'''
import urllib2
import sys
import json
year = sys.argv[1]
wfo = sys.argv[2]
phenomena = sys.argv[3]
significance = sys.argv[4]
eventid = sys.argv[5]
def wrap(data):
''' convert data into more noaaportish '''
return "\001" + data.replace("\n", "\r\r\n") +"\003"
def process(j):
''' Process the json data j '''
if len(j['data']) == 0:
print 'ERROR: No results found!'
return
out = open('/tmp/vtec_data.txt', 'w')
out.write( wrap( j['data'][0]['report'] ) )
for svs in j['data'][0]['svs']:
out.write( wrap( svs ) )
out.close()
if __name__ == '__main__':
uri = ("https://nwschat.weather.gov/vtec/json-text.php?"+
"year=%s&wfo=%s&phenomena=%s&eventid=%s&significance=%s") % (
year, wfo, phenomena, eventid, significance)
data = urllib2.urlopen(uri).read()
j = json.loads(data)
process(j) | mit | Python |
64b85415f540efd64132c263c6de240bfc3b0bd8 | Fix unused import error. | synicalsyntax/zulip,timabbott/zulip,punchagan/zulip,showell/zulip,shubhamdhama/zulip,zulip/zulip,eeshangarg/zulip,showell/zulip,brainwane/zulip,zulip/zulip,kou/zulip,timabbott/zulip,andersk/zulip,synicalsyntax/zulip,andersk/zulip,punchagan/zulip,punchagan/zulip,hackerkid/zulip,eeshangarg/zulip,rht/zulip,synicalsyntax/zulip,zulip/zulip,andersk/zulip,rht/zulip,eeshangarg/zulip,brainwane/zulip,andersk/zulip,rht/zulip,eeshangarg/zulip,showell/zulip,hackerkid/zulip,rht/zulip,rht/zulip,andersk/zulip,eeshangarg/zulip,shubhamdhama/zulip,zulip/zulip,zulip/zulip,synicalsyntax/zulip,eeshangarg/zulip,punchagan/zulip,punchagan/zulip,brainwane/zulip,kou/zulip,hackerkid/zulip,eeshangarg/zulip,zulip/zulip,showell/zulip,kou/zulip,kou/zulip,timabbott/zulip,timabbott/zulip,hackerkid/zulip,hackerkid/zulip,brainwane/zulip,zulip/zulip,shubhamdhama/zulip,hackerkid/zulip,shubhamdhama/zulip,andersk/zulip,timabbott/zulip,timabbott/zulip,shubhamdhama/zulip,showell/zulip,rht/zulip,punchagan/zulip,kou/zulip,shubhamdhama/zulip,hackerkid/zulip,synicalsyntax/zulip,synicalsyntax/zulip,kou/zulip,rht/zulip,shubhamdhama/zulip,showell/zulip,timabbott/zulip,punchagan/zulip,showell/zulip,andersk/zulip,brainwane/zulip,brainwane/zulip,kou/zulip,brainwane/zulip,synicalsyntax/zulip | analytics/migrations/0015_clear_duplicate_counts.py | analytics/migrations/0015_clear_duplicate_counts.py | # -*- coding: utf-8 -*-
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Count, Sum
def clear_duplicate_counts(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""This is a preparatory migration for our Analytics tables.
The backstory is that Django's unique_together indexes do not properly
handle the subgroup=None corner case (allowing duplicate rows that have a
subgroup of None), which meant that in race conditions, rather than updating
an existing row for the property/realm/time with subgroup=None, Django would
create a duplicate row.
In the next migration, we'll add a proper constraint to fix this bug, but
we need to fix any existing problematic rows before we can add that constraint.
We fix this in an appropriate fashion for each type of CountStat object; mainly
this means deleting the extra rows, but for LoggingCountStat objects, we need to
additionally combine the sums.
"""
RealmCount = apps.get_model('analytics', 'RealmCount')
realm_counts = RealmCount.objects.filter(subgroup=None).values(
'realm_id', 'property', 'end_time').annotate(
Count('id'), Sum('value')).filter(id__count__gt=1)
for realm_count in realm_counts:
realm_count.pop('id__count')
total_value = realm_count.pop('value__sum')
duplicate_counts = list(RealmCount.objects.filter(**realm_count))
first_count = duplicate_counts[0]
if realm_count['property'] in ["invites_sent::day", "active_users_log:is_bot:day"]:
# For LoggingCountStat objects, the right fix is to combine the totals;
# for other CountStat objects, we expect the duplicates to have the same value.
# And so all we need to do is delete them.
first_count.value = total_value
first_count.save()
to_cleanup = duplicate_counts[1:]
for duplicate_count in to_cleanup:
duplicate_count.delete()
class Migration(migrations.Migration):
dependencies = [
('analytics', '0014_remove_fillstate_last_modified'),
]
operations = [
migrations.RunPython(clear_duplicate_counts,
reverse_code=migrations.RunPython.noop),
]
| # -*- coding: utf-8 -*-
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Count, Sum, Q
def clear_duplicate_counts(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""This is a preparatory migration for our Analytics tables.
The backstory is that Django's unique_together indexes do not properly
handle the subgroup=None corner case (allowing duplicate rows that have a
subgroup of None), which meant that in race conditions, rather than updating
an existing row for the property/realm/time with subgroup=None, Django would
create a duplicate row.
In the next migration, we'll add a proper constraint to fix this bug, but
we need to fix any existing problematic rows before we can add that constraint.
We fix this in an appropriate fashion for each type of CountStat object; mainly
this means deleting the extra rows, but for LoggingCountStat objects, we need to
additionally combine the sums.
"""
RealmCount = apps.get_model('analytics', 'RealmCount')
realm_counts = RealmCount.objects.filter(subgroup=None).values(
'realm_id', 'property', 'end_time').annotate(
Count('id'), Sum('value')).filter(id__count__gt=1)
for realm_count in realm_counts:
realm_count.pop('id__count')
total_value = realm_count.pop('value__sum')
duplicate_counts = list(RealmCount.objects.filter(**realm_count))
first_count = duplicate_counts[0]
if realm_count['property'] in ["invites_sent::day", "active_users_log:is_bot:day"]:
# For LoggingCountStat objects, the right fix is to combine the totals;
# for other CountStat objects, we expect the duplicates to have the same value.
# And so all we need to do is delete them.
first_count.value = total_value
first_count.save()
to_cleanup = duplicate_counts[1:]
for duplicate_count in to_cleanup:
duplicate_count.delete()
class Migration(migrations.Migration):
dependencies = [
('analytics', '0014_remove_fillstate_last_modified'),
]
operations = [
migrations.RunPython(clear_duplicate_counts,
reverse_code=migrations.RunPython.noop),
]
| apache-2.0 | Python |
70b9de8e589776ba90c000addfa24dffe5915b33 | Set version to v3.0.0a32 | spacy-io/spaCy,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy | spacy/about.py | spacy/about.py | # fmt: off
__title__ = "spacy-nightly"
__version__ = "3.0.0a32"
__release__ = True
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects"
__projects_branch__ = "v3"
| # fmt: off
__title__ = "spacy-nightly"
__version__ = "3.0.0a31"
__release__ = True
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects"
__projects_branch__ = "v3"
| mit | Python |
d90398643933999d734a09bc3637a8723d5de2c3 | Increment version | explosion/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,recognai/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,aikramer2/spaCy,aikramer2/spaCy,recognai/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy | spacy/about.py | spacy/about.py | # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.0.0a16'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = 'contact@explosion.ai'
__license__ = 'MIT'
__release__ = True
__docs_models__ = 'https://alpha.spacy.io/usage/models'
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
__compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json'
__shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts.json'
__model_files__ = 'https://raw.githubusercontent.com/explosion/spacy-dev-resources/develop/templates/model/'
| # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.0.0a15'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = 'contact@explosion.ai'
__license__ = 'MIT'
__release__ = False
__docs_models__ = 'https://alpha.spacy.io/usage/models'
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
__compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json'
__shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts.json'
__model_files__ = 'https://raw.githubusercontent.com/explosion/spacy-dev-resources/develop/templates/model/'
| mit | Python |
9da597780ee7115effb9662c5d36d1b6553c1cfb | Fix typo | enram/data-repository,enram/infrastructure,enram/data-repository,enram/data-repository,enram/infrastructure,enram/data-repository | file_transfer/baltrad_to_s3.py | file_transfer/baltrad_to_s3.py | """
Baltrad to S3 porting
"""
import sys
from creds import URL, LOGIN, PASSWORD
import datamover as dm
def main():
"""Run data transfer from Baltrad to S3"""
# ------------------
# DATA TRANSFER
# ------------------
# Setup the connection of the Baltrad and S3
btos = dm.BaltradToS3(URL, LOGIN, PASSWORD, "lw-enram")
# Execute the transfer
btos.transfer(name_match="_vp_", overwrite=False, limit=None)
btos.report(reset_file=False, transfertype="Baltrad to S3")
# ------------------
# UPDATE COVERAGE
# ------------------
# Connect to S3 client
s3client = dm.S3EnramHandler("lw-enram")
# Rerun file list overview to extract the current coverage
coverage_count = s3client.count_enram_coverage(level='day')
with open("coverage.csv", 'w') as outfile:
dm.coverage_to_csv(outfile, coverage_count)
s3client.upload_file("coverage.csv", "coverage.csv")
# ----------------------------
# UPDATE ZIP FILE AVAILABILITY
# ----------------------------
# Rerun ZIP handling of S3 for the transferred files, given by report
s3client.create_zip_version(btos.transferred)
if __name__ == "__main__":
sys.exit(main())
| """
Baltrad to S3 porting
"""
import sys
from creds import URL, LOGIN, PASSWORD
import datamover as dm
def main():
"""Run data transfer from Baltrad to S3"""
# ------------------
# DATA TRANSFER
# ------------------
# Setup the connection of the Baltrad and S3
btos = dm.BaltradToS3(URL, LOGIN, PASSWORD, "lw-enram")
# Execute the transfer
btos.transfer(name_match="_vp_", overwrite=False, limit=None)
btos.report(reset_file=False, transfertype="Baltrad to S3")
# ------------------
# UPDATE COVERAGE
# ------------------
# Connecto to S3 client
s3client = dm.S3EnramHandler("lw-enram")
# Rerun file list overview to extract the current coverage
coverage_count = s3client.count_enram_coverage(level='day')
with open("coverage.csv", 'w') as outfile:
dm.coverage_to_csv(outfile, coverage_count)
s3client.upload_file("coverage.csv", "coverage.csv")
# ----------------------------
# UPDATE ZIP FILE AVAILABILITY
# ----------------------------
# Rerun ZIP handling of S3 for the transferred files, given by report
s3client.create_zip_version(btos.transferred)
if __name__ == "__main__":
sys.exit(main())
| mit | Python |
46a222127bcfc30f89f8f43bac45c00fb677adf9 | Add Interfaces column to plugins output. (#1667) | Danfocus/Flexget,OmgOhnoes/Flexget,qk4l/Flexget,malkavi/Flexget,Danfocus/Flexget,poulpito/Flexget,crawln45/Flexget,malkavi/Flexget,tobinjt/Flexget,Danfocus/Flexget,sean797/Flexget,ianstalk/Flexget,jawilson/Flexget,gazpachoking/Flexget,jawilson/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,Danfocus/Flexget,sean797/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,ianstalk/Flexget,LynxyssCZ/Flexget,crawln45/Flexget,poulpito/Flexget,jawilson/Flexget,crawln45/Flexget,LynxyssCZ/Flexget,OmgOhnoes/Flexget,JorisDeRieck/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,malkavi/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,tobinjt/Flexget,qk4l/Flexget,qk4l/Flexget,poulpito/Flexget,gazpachoking/Flexget,ianstalk/Flexget,tobinjt/Flexget,LynxyssCZ/Flexget,crawln45/Flexget,jawilson/Flexget,sean797/Flexget | flexget/plugins/cli/plugins.py | flexget/plugins/cli/plugins.py | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from colorclass.toggles import disable_all_colors
from flexget import options
from flexget.event import event
from flexget.plugin import get_plugins
from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize
log = logging.getLogger('plugins')
def plugins_summary(manager, options):
if options.table_type == 'porcelain':
disable_all_colors()
header = ['Keyword', 'Interfaces', 'Phases', 'Flags']
table_data = [header]
for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):
if options.builtins and not plugin.builtin:
continue
flags = []
if plugin.instance.__doc__:
flags.append('doc')
if plugin.builtin:
flags.append('builtin')
if plugin.debug:
if not options.debug:
continue
flags.append('developers')
handlers = plugin.phase_handlers
roles = []
for phase in handlers:
priority = handlers[phase].priority
roles.append('{0}({1})'.format(phase, priority))
name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name
table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])
try:
table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
return
console(colorize('green', ' Built-in plugins'))
@event('options.register')
def register_parser_arguments():
parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',
parents=[table_parser])
parser.add_argument('--interface', help='Show plugins belonging to this interface')
parser.add_argument('--phase', help='Show plugins that act on this phase')
parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')
| from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from colorclass.toggles import disable_all_colors
from flexget import options
from flexget.event import event
from flexget.plugin import get_plugins
from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize
log = logging.getLogger('plugins')
def plugins_summary(manager, options):
if options.table_type == 'porcelain':
disable_all_colors()
header = ['Keyword', 'Phases', 'Flags']
table_data = [header]
for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):
if options.builtins and not plugin.builtin:
continue
flags = []
if plugin.instance.__doc__:
flags.append('doc')
if plugin.builtin:
flags.append('builtin')
if plugin.debug:
if not options.debug:
continue
flags.append('developers')
handlers = plugin.phase_handlers
roles = []
for phase in handlers:
priority = handlers[phase].priority
roles.append('{0}({1})'.format(phase, priority))
name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name
table_data.append([name, ', '.join(roles), ', '.join(flags)])
try:
table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
return
console(colorize('green', ' Built-in plugins'))
@event('options.register')
def register_parser_arguments():
parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',
parents=[table_parser])
parser.add_argument('--interface', help='Show plugins belonging to this interface')
parser.add_argument('--phase', help='Show plugins that act on this phase')
parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')
| mit | Python |
bd5c215c1c481f3811753412bca6b509bb00591a | Improve the way that import middlewares | lord63/me-api | me_api/app.py | me_api/app.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import Flask
from werkzeug.utils import import_string
from me_api.middleware.me import me
from me_api.cache import cache
middlewares = {
'douban': 'me_api.middleware.douban:douban_api',
'github': 'me_api.middleware.github:github_api',
'instagram': 'me_api.middleware.instagram:instagram_api',
'keybase': 'me_api.middleware.keybase:keybase_api',
'medium': 'me_api.middleware.medium:medium_api',
'stackoverflow': 'me_api.middleware.stackoverflow:stackoverflow_api',
}
def create_app(config):
app = Flask(__name__)
app.config.from_object(config)
cache.init_app(app)
modules = config.modules['modules']
app.register_blueprint(me)
for module in modules.keys():
blueprint = import_string(middlewares[module])
app.register_blueprint(blueprint)
return app
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import Flask
from .middleware.me import me
from .cache import cache
def _register_module(app, module):
if module == 'douban':
from .middleware import douban
app.register_blueprint(douban.douban_api)
elif module == 'github':
from .middleware import github
app.register_blueprint(github.github_api)
elif module == 'instagram':
from .middleware import instagram
app.register_blueprint(instagram.instagram_api)
elif module == 'keybase':
from .middleware import keybase
app.register_blueprint(keybase.keybase_api)
elif module == 'medium':
from .middleware import medium
app.register_blueprint(medium.medium_api)
elif module == 'stackoverflow':
from .middleware import stackoverflow
app.register_blueprint(stackoverflow.stackoverflow_api)
def create_app(config):
app = Flask(__name__)
app.config.from_object(config)
cache.init_app(app)
modules = config.modules['modules']
app.register_blueprint(me)
for module in modules.keys():
_register_module(app, module)
return app
| mit | Python |
bbe83e43feef903d3ac4485280f7b6f01a50b369 | Fix bug: closes a batch if the data ends and the batch size is not reach | google/megalista,google/megalista | megalist_dataflow/utils/group_by_execution_dofn.py | megalist_dataflow/utils/group_by_execution_dofn.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from apache_beam import DoFn
from apache_beam.transforms import window
class GroupByExecutionDoFn(DoFn):
"""
Group elements received in batches.
Elements must by dictionaries with an Execution on key 'execution'.
When an Execution changes between elements, a batch is returned if it hasn't archived batch size
"""
def __init__(
self,
batch_size=5000 # type: int
):
super().__init__()
self._batch_size = batch_size
self._batch = None
self._last_execution = None
def start_bundle(self):
self._batch = []
def process(self, element, *args, **kwargs):
execution = element['execution']
# Finish a batch if the Execution changes from last element
if self._last_execution is not None and self._last_execution != execution:
yield self._batch
self._batch = []
self._last_execution = execution
self._batch.append(element)
if len(self._batch) >= self._batch_size:
yield self._batch
self._batch = []
def finish_bundle(self):
if len(self._batch) > 0:
yield window.GlobalWindows.windowed_value(self._batch)
self._batch = []
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from apache_beam import DoFn
class GroupByExecutionDoFn(DoFn):
"""
Group elements received in batches.
Elements must by dictionaries with an Execution on key 'execution'.
When an Execution changes between elements, a batch is returned if it hasn't archived batch size
"""
def __init__(self,
batch_size=5000 # type: int
):
super().__init__()
self._batch_size = batch_size
self._batch = None
self._last_execution = None
def start_bundle(self):
self._batch = []
def process(self, element, *args, **kwargs):
execution = element['execution']
if self._last_execution is not None and self._last_execution != execution:
yield self._batch
self._batch = []
self._last_execution = execution
self._batch.append(element)
if len(self._batch) >= self._batch_size:
yield self._batch
self._batch = []
| apache-2.0 | Python |
af6f4868f4329fec75e43fe0cdcd1a7665c5238a | Remove comment on attaching cloud debugger | DXCanas/content-curation,DXCanas/content-curation,DXCanas/content-curation,DXCanas/content-curation | contentcuration/manage.py | contentcuration/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
#import warnings
#warnings.filterwarnings('ignore', message=r'Module .*? is being added to sys\.path', append=True)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| #!/usr/bin/env python
import os
import sys
# Attach Python Cloud Debugger
if __name__ == "__main__":
#import warnings
#warnings.filterwarnings('ignore', message=r'Module .*? is being added to sys\.path', append=True)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| mit | Python |
256c321398daaad749905a466ba1a7dcd75711aa | Update to exception handling | TJKessler/ECNet | ecnet/error_utils.py | ecnet/error_utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ecnet/error_utils.py
# v.1.3.0.dev1
# Developed in 2018 by Travis Kessler <Travis_Kessler@student.uml.edu>
#
# This program contains functions for error calculations
#
import numpy as np
from math import sqrt
# Calculates the root-mean-square error between two arguments of equal length
def calc_rmse(y_hat, y):
try:
return(np.sqrt(((y_hat-y)**2).mean()))
except:
try:
return(np.sqrt(((np.asarray(y_hat)-np.asarray(y))**2).mean()))
except:
raise ValueError("Error in calculating RMSE. Check input data format.")
# Calculates the mean average error between two arguments of equal length
def calc_mean_abs_error(y_hat, y):
try:
return(abs(y_hat-y).mean())
except:
try:
return(abs(np.asarray(y_hat)-np.asarray(y)).mean())
except:
raise ValueError("Error in calculating mean average error. Check input data format.")
# Calculates the median absoltute error between two arguments of equal length
def calc_med_abs_error(y_hat, y):
try:
return(np.median(np.absolute(y_hat-y)))
except:
try:
return(np.median(np.absolute(np.asarray(y_hat)-np.asarray(y))))
except:
raise ValueError("Error in calculating median absolute error. Check input data format.")
# Calculates the correlation of determination, or r-squared value, between two arguments of equal length
def calc_r2(y_hat, y):
try:
y_mean = y.mean()
except:
try:
y_form = []
for i in range(len(y)):
y_form.append(y[i][0])
y_mean = sum(y_form)/len(y_form)
except:
raise ValueError("Error in calculating r-squared. Check input data format.")
try:
s_res = np.sum((y_hat-y)**2)
s_tot = np.sum((y-y_mean)**2)
return(1 - (s_res/s_tot))
except:
try:
s_res = np.sum((np.asarray(y_hat)-np.asarray(y))**2)
s_tot = np.sum((np.asarray(y)-y_mean)**2)
return(1 - (s_res/s_tot))
except:
raise ValueError("Error in calculating r-squared. Check input data format.")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ecnet/error_utils.py
# v.1.3.0.dev1
# Developed in 2018 by Travis Kessler <Travis_Kessler@student.uml.edu>
#
# This program contains functions for error calculations
#
import numpy as np
from math import sqrt
# Calculates the root-mean-square error between two arguments of equal length
def calc_rmse(y_hat, y):
try:
return(np.sqrt(((y_hat-y)**2).mean()))
except:
try:
return(np.sqrt(((np.asarray(y_hat)-np.asarray(y))**2).mean()))
except:
print("Error in calculating RMSE. Check input data format.")
raise
sys.exit()
# Calculates the mean average error between two arguments of equal length
def calc_mean_abs_error(y_hat, y):
try:
return(abs(y_hat-y).mean())
except:
try:
return(abs(np.asarray(y_hat)-np.asarray(y)).mean())
except:
print("Error in calculating mean average error. Check input data format.")
raise
sys.exit()
# Calculates the median absoltute error between two arguments of equal length
def calc_med_abs_error(y_hat, y):
try:
return(np.median(np.absolute(y_hat-y)))
except:
try:
return(np.median(np.absolute(np.asarray(y_hat)-np.asarray(y))))
except:
return("Error in calculating median absolute error. Check input data format.")
raise
sys.exit()
# Calculates the correlation of determination, or r-squared value, between two arguments of equal length
def calc_r2(y_hat, y):
try:
y_mean = y.mean()
except:
try:
y_form = []
for i in range(len(y)):
y_form.append(y[i][0])
y_mean = sum(y_form)/len(y_form)
except:
print("Error in calculating r-squared. Check input data format.")
raise
sys.exit()
try:
s_res = np.sum((y_hat-y)**2)
s_tot = np.sum((y-y_mean)**2)
return(1 - (s_res/s_tot))
except:
try:
s_res = np.sum((np.asarray(y_hat)-np.asarray(y))**2)
s_tot = np.sum((np.asarray(y)-y_mean)**2)
return(1 - (s_res/s_tot))
except:
print("Error in calculating r-squared. Check input data format.")
raise
sys.exit()
| mit | Python |
948b9987afa95d7a69bd61f3d8f9fea822323b01 | Implement equality check for DraftText nodes | gasman/wagtaildraftail,gasman/wagtaildraftail,gasman/wagtaildraftail,springload/wagtaildraftail,gasman/wagtaildraftail,springload/wagtaildraftail,springload/wagtaildraftail,springload/wagtaildraftail | wagtaildraftail/draft_text.py | wagtaildraftail/draft_text.py | from __future__ import absolute_import, unicode_literals
import json
from django.utils.functional import cached_property
from draftjs_exporter.html import HTML
from wagtail.wagtailcore.rich_text import RichText
from wagtaildraftail.settings import get_exporter_config
class DraftText(RichText):
def __init__(self, value, **kwargs):
super(DraftText, self).__init__(value or '{}', **kwargs)
self.exporter = HTML(get_exporter_config())
def get_json(self):
return self.source
@cached_property
def _html(self):
return self.exporter.render(json.loads(self.source))
def __html__(self):
return self._html
def __eq__(self, other):
return self.__html__() == other.__html__()
| from __future__ import absolute_import, unicode_literals
import json
from draftjs_exporter.html import HTML
from wagtail.wagtailcore.rich_text import RichText
from wagtaildraftail.settings import get_exporter_config
class DraftText(RichText):
def __init__(self, value, **kwargs):
super(DraftText, self).__init__(value or '{}', **kwargs)
self.exporter = HTML(get_exporter_config())
def get_json(self):
return self.source
def __html__(self):
return self.exporter.render(json.loads(self.source))
| mit | Python |
fa06c892c163054e3420e756acaa0b3af7863b88 | add import test for OpenCV | rjw57/calibtools | setup.py | setup.py | import os
import sys
from setuptools import setup, find_packages
from calibtools import __version__ as version
try:
import cv2
HAVE_OPENCV=True
except ImportError:
HAVE_OPENCV=False
if not HAVE_OPENCV:
sys.stderr.write("OpenCV's Python binding is required for calibtools\n")
sys.exit(1)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'calibtools',
version = version,
description = 'Camera calibration tools for Python',
long_description = read('README.rst'),
author = 'Rich Wareham',
author_email = 'rich.calibtools@richwareham.com',
url = 'https://github.com/rjw57/calibtools/',
packages = find_packages(),
entry_points = {
'console_scripts': [
'calibtools-calib = calibtools.calib:start',
'calibtools-undistort = calibtools.undistort:start',
]
},
licence = 'MIT',
install_requires = [
'moviepy', 'numpy', 'docopt',
],
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
],
)
| import os
from setuptools import setup, find_packages
from calibtools import __version__ as version
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'calibtools',
version = version,
description = 'Camera calibration tools for Python',
long_description = read('README.rst'),
author = 'Rich Wareham',
author_email = 'rich.calibtools@richwareham.com',
url = 'https://github.com/rjw57/calibtools/',
packages = find_packages(),
entry_points = {
'console_scripts': [
'calibtools-calib = calibtools.calib:start',
'calibtools-undistort = calibtools.undistort:start',
]
},
licence = 'MIT',
install_requires = [
'moviepy', 'numpy', 'docopt',
],
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
],
)
| mit | Python |
dcca2dcdba9698c4a354cdb3de68c37a07ba0b1e | Create 0.2 release | bridadan/pyOCD,wjzhang/pyOCD,NordicSemiconductor/pyOCD,matthewelse/pyOCD,devanlai/pyOCD,devanlai/pyOCD,geky/pyOCDgdb,geky/pyDAPLink,flit/pyOCD,adamgreen/pyOCD,0xc0170/pyOCD,mesheven/pyOCD,geky/pyOCD,tgarc/pyOCD,wjzhang/pyOCD,mbedmicro/pyOCD,tgarc/pyOCD,wjzhang/pyOCD,matthewelse/pyOCD,matthewelse/pyOCD,pyocd/pyOCD,oliviermartin/pyOCD,adamgreen/pyOCD,mbedmicro/pyOCD,c1728p9/pyOCD,0xc0170/pyOCD,adamgreen/pyOCD,molejar/pyOCD,tgarc/pyOCD,mesheven/pyOCD,0xc0170/pyOCD,mbedmicro/pyOCD,bridadan/pyOCD,bridadan/pyOCD,mesheven/pyOCD,oliviermartin/pyOCD,geky/pyOCD,c1728p9/pyOCD,molejar/pyOCD,pyocd/pyOCD,c1728p9/pyOCD,flit/pyOCD,oliviermartin/pyOCD,molejar/pyOCD,devanlai/pyOCD | setup.py | setup.py | """
mbed CMSIS-DAP debugger
Copyright (c) 2012-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.core import setup
setup(
name="pyOCD",
version="0.2",
description="CMSIS-DAP debugger for python",
author="samux, emilmont",
author_email="Samuel.Mokrani@arm.com, Emilio.Monti@arm.com",
license="Apache 2.0",
classifiers = [
"Development Status :: 4 - Beta",
"License :: Apache 2.0",
"Programming Language :: Python",
],
packages=["pyOCD", "pyOCD.flash", "pyOCD.gdbserver", "pyOCD.interface", "pyOCD.target", "pyOCD.transport", "pyOCD.board"]
)
| """
mbed CMSIS-DAP debugger
Copyright (c) 2012-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.core import setup
setup(
name="pyOCD",
version="0.1",
description="CMSIS-DAP debugger for python",
author="samux, emilmont",
author_email="Samuel.Mokrani@arm.com, Emilio.Monti@arm.com",
license="Apache 2.0",
classifiers = [
"Development Status :: 4 - Beta",
"License :: Apache 2.0",
"Programming Language :: Python",
],
packages=["pyOCD", "pyOCD.flash", "pyOCD.gdbserver", "pyOCD.interface", "pyOCD.target", "pyOCD.transport", "pyOCD.board"]
)
| apache-2.0 | Python |
cca77827bcd80064bbcf47d362e367c8fe8dbcbb | Bump version 1.0.10 | arteria/django-compat,arteria/django-compat | setup.py | setup.py | # -*- encoding: utf-8 -*-
import os, sys
from setuptools import setup
from setuptools import find_packages
# Make the open function accept encodings in python < 3.x
if sys.version_info[0] < 3:
import codecs
open = codecs.open # pylint: disable=redefined-builtin
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def get_path(fname):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), fname)
def read(fname):
return open(get_path(fname), 'r', encoding='utf8').read()
if sys.argv[-1] == 'genreadme':
try:
import pypandoc
long_description = pypandoc.convert(get_path('README.md'), 'rst')
long_description = long_description.split('<!---Illegal PyPi RST data -->')[0]
f = open(get_path('README.rst'), 'w')
f.write(long_description)
f.close()
print("Successfully converted README.md to README.rst")
except (IOError, ImportError):
pass
sys.exit()
try:
long_description=read('README.rst')
except (OSError, IOError):
try:
long_description=read('README.md')
except (OSError, IOError):
long_description = ""
install_requires = [
'django>=1.4,<1.10',
'six>=1.10.0',
]
setup(
name="django-compat",
version="1.0.10",
author_email="admin@arteria.ch",
packages=find_packages(),
include_package_data=True,
description="For- and backwards compatibility layer for Django 1.4, 1.7, 1.8, and 1.9",
long_description=long_description,
license='MIT',
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| # -*- encoding: utf-8 -*-
import os, sys
from setuptools import setup
from setuptools import find_packages
# Make the open function accept encodings in python < 3.x
if sys.version_info[0] < 3:
import codecs
open = codecs.open # pylint: disable=redefined-builtin
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def get_path(fname):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), fname)
def read(fname):
return open(get_path(fname), 'r', encoding='utf8').read()
if sys.argv[-1] == 'genreadme':
try:
import pypandoc
long_description = pypandoc.convert(get_path('README.md'), 'rst')
long_description = long_description.split('<!---Illegal PyPi RST data -->')[0]
f = open(get_path('README.rst'), 'w')
f.write(long_description)
f.close()
print("Successfully converted README.md to README.rst")
except (IOError, ImportError):
pass
sys.exit()
try:
long_description=read('README.rst')
except (OSError, IOError):
try:
long_description=read('README.md')
except (OSError, IOError):
long_description = ""
install_requires = [
'django>=1.4,<1.10',
'six>=1.10.0',
]
setup(
name="django-compat",
version="1.0.9",
author_email="admin@arteria.ch",
packages=find_packages(),
include_package_data=True,
description="For- and backwards compatibility layer for Django 1.4, 1.7, 1.8, and 1.9",
long_description=long_description,
license='MIT',
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| mit | Python |
920d1946c6188b822d4d0f318741fdc784dbf0b9 | change version # | kod3r/tracestack,danrobinson/tracestack | setup.py | setup.py | #!/usr/bin/env python
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
from setuptools import setup
setup(name='tracestack',
version='0.2.4',
description='Instantly search your Python error messages on the web.',
author='Dan Robinson',
author_email='danrobinson010@gmail.com',
url='https://www.github.com/danrobinson/tracestack',
download_url='https://github.com/danrobinson/tracestack/tarball/0.2.4',
long_description=long_description,
packages=['tracestack'],
test_suite="tests",
tests_require=["mock", "tox"],
entry_points = {'console_scripts': ['tracestack=tracestack.command_line:run'],}
) | #!/usr/bin/env python
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
from setuptools import setup
setup(name='tracestack',
version='0.2.3',
description='Instantly search your Python error messages on the web.',
author='Dan Robinson',
author_email='danrobinson010@gmail.com',
url='https://www.github.com/danrobinson/tracestack',
download_url='https://github.com/danrobinson/tracestack/tarball/0.2.3',
long_description=long_description,
packages=['tracestack'],
test_suite="tests",
tests_require=["mock", "tox"],
entry_points = {'console_scripts': ['tracestack=tracestack.command_line:run'],}
) | mit | Python |
a6c28c06523db2bdec94c426c9e345fc163a0bb9 | Update URLs to reflect new structure | Schevo/schevogtk | setup.py | setup.py | __version__ = '1.0a3'
from setuptools import setup, find_packages
import sys, os
import textwrap
setup(
name="SchevoGtk",
version=__version__,
description="Schevo tools for PyGTK",
long_description=textwrap.dedent("""
Provides integration between Schevo_ and PyGTK_.
.. _Schevo: http://schevo.org/
.. _PyGTK: http://pygtk.org/
The latest development version is available in a `Subversion
repository
<http://schevo.org/svn/trunk/Gtk#egg=SchevoGtk-dev>`__.
"""),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
keywords='',
author='Orbtech, L.L.C. and contributors',
author_email='schevo-devel@lists.schevo.org',
url='http://schevo.org/wiki/SchevoGtk',
license='LGPL',
platforms=['UNIX', 'Windows'],
packages=find_packages(exclude=['doc', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'Schevo >= 3.0b2',
# XXX: The following don't yet work.
## 'kiwi==dev',
## 'gazpacho==dev',
],
tests_require=[
'nose >= 0.9.0',
],
test_suite='nose.collector',
extras_require={
},
dependency_links = [
'http://schevo.org/files/thirdparty/',
],
entry_points = """
[schevo.schevo_command]
gnav = schevogtk2.script:start
""",
)
| __version__ = '1.0a3'
from setuptools import setup, find_packages
import sys, os
import textwrap
setup(
name="SchevoGtk",
version=__version__,
description="Schevo tools for PyGTK",
long_description=textwrap.dedent("""
Provides integration between Schevo_ and PyGTK_.
.. _Schevo: http://schevo.org/
.. _PyGTK: http://pygtk.org/
The latest development version is available in a `Subversion
repository
<http://schevo.org/svn/trunk/Gtk#egg=SchevoGtk-dev>`__.
"""),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
keywords='',
author='Orbtech, L.L.C. and contributors',
author_email='schevo-devel@lists.schevo.org',
url='http://schevo.org/trac/wiki/SchevoGtk',
license='LGPL',
platforms=['UNIX', 'Windows'],
packages=find_packages(exclude=['doc', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'Schevo >= 3.0b2',
# XXX: The following don't yet work.
## 'kiwi==dev',
## 'gazpacho==dev',
],
tests_require=[
'nose >= 0.9.0',
],
test_suite='nose.collector',
extras_require={
},
dependency_links = [
'http://schevo.org/files/thirdparty/',
],
entry_points = """
[schevo.schevo_command]
gnav = schevogtk2.script:start
""",
)
| mit | Python |
15f63ad1656c96aa05367efcaee45d30e74fbf6d | make pylint required for running tests so it installs on circle automatically | pantheon-systems/docker_iptables | setup.py | setup.py | from setuptools import setup
setup(
name='docker_iptables',
scripts=['docker_iptables.py'],
version='0.0.1',
tests_require=['pylint', 'mock'],
test_suite="tests",
)
| from setuptools import setup
setup(
name='docker_iptables',
scripts=['docker_iptables.py'],
version='0.0.1',
tests_require=['mock'],
test_suite="tests",
)
| mit | Python |
ad53baaa9af1e18068c0e4924c123ffa6cd5dde0 | Bump version number | AntoineGagne/cmus-notify | setup.py | setup.py | #! /usr/bin/env python3
import os
from setuptools import setup
def get_long_description(file_name: str) -> str:
"""Gets the long description from the specified file's name.
:param file_name: The file's name
:type file_name: str
:return: The content of the file
:rtype: str
"""
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
if __name__ == '__main__':
setup(
name='cmus-notify',
version='1.2.2',
description='A package for displaying Cmus current status in notifications',
author='Antoine Gagne',
keywords='utilities application cli hook',
author_email='antoine.gagne.2@ulaval.ca',
url='https://github.com/AntoineGagne/cmus-notify',
packages=['cmus_notify'],
entry_points={
'console_scripts': ['cmus_notify = cmus_notify.cmus_notify:main']
},
license='MIT',
data_files=[],
include_package_data=True,
long_description=get_long_description('README.rst'),
setup_requires=['pytest-runner', 'flake8'],
tests_require=['pytest'],
test_suite='tests',
scripts=[],
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.5',
'Topic :: Multimedia :: Sound/Audio'
],
install_requires=['notify2>=0.3']
)
| #! /usr/bin/env python3
import os
from setuptools import setup
def get_long_description(file_name: str) -> str:
"""Gets the long description from the specified file's name.
:param file_name: The file's name
:type file_name: str
:return: The content of the file
:rtype: str
"""
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
if __name__ == '__main__':
setup(
name='cmus-notify',
version='1.2.1',
description='A package for displaying Cmus current status in notifications',
author='Antoine Gagne',
keywords='utilities application cli hook',
author_email='antoine.gagne.2@ulaval.ca',
url='https://github.com/AntoineGagne/cmus-notify',
packages=['cmus_notify'],
entry_points={
'console_scripts': ['cmus_notify = cmus_notify.cmus_notify:main']
},
license='MIT',
data_files=[],
include_package_data=True,
long_description=get_long_description('README.rst'),
setup_requires=['pytest-runner', 'flake8'],
tests_require=['pytest'],
test_suite='tests',
scripts=[],
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.5',
'Topic :: Multimedia :: Sound/Audio'
],
install_requires=['notify2>=0.3']
)
| mit | Python |
d1fc96d56e0efcf89a4269d4c0db90ecc7e89ba7 | Add `south_migrations` to packages in setup.py | samuelcolvin/django-watson,arteria/django-watson,dbaxa/django-watson,etianen/django-watson,philippeowagner/django-watson,dbaxa/django-watson,etianen/django-watson,arteria/django-watson,samuelcolvin/django-watson,philippeowagner/django-watson | setup.py | setup.py | import os
from distutils.core import setup
setup(
name = "django-watson",
version = "1.1.6",
description = "Full-text multi-table search application for Django. Easy to install and use, with good performance.",
long_description = open(os.path.join(os.path.dirname(__file__), "README.markdown")).read(),
author = "Dave Hall",
author_email = "dave@etianen.com",
url = "http://github.com/etianen/django-watson",
zip_safe = False,
packages = [
"watson",
"watson.management",
"watson.management.commands",
"watson.migrations",
"watson.south_migrations",
"watson.templatetags",
],
package_dir = {
"": "src",
},
package_data = {
"watson": [
"locale/*/LC_MESSAGES/django.*",
"templates/watson/*.html",
"templates/watson/includes/*.html",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
"Framework :: Django",
],
)
| import os
from distutils.core import setup
setup(
name = "django-watson",
version = "1.1.6",
description = "Full-text multi-table search application for Django. Easy to install and use, with good performance.",
long_description = open(os.path.join(os.path.dirname(__file__), "README.markdown")).read(),
author = "Dave Hall",
author_email = "dave@etianen.com",
url = "http://github.com/etianen/django-watson",
zip_safe = False,
packages = [
"watson",
"watson.management",
"watson.management.commands",
"watson.migrations",
"watson.templatetags",
],
package_dir = {
"": "src",
},
package_data = {
"watson": [
"locale/*/LC_MESSAGES/django.*",
"templates/watson/*.html",
"templates/watson/includes/*.html",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
"Framework :: Django",
],
)
| bsd-3-clause | Python |
0e6e323c1f5952827382148662ec38bf0a076459 | Bump to 0.7.2 dev | hydralabs/pyamf,hydralabs/pyamf,thijstriemstra/pyamf,thijstriemstra/pyamf | setup.py | setup.py | #!/usr/bin/env python
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
# import ordering is important
import setupinfo
from setuptools import setup, find_packages
version = (0, 7, 2, 'dev')
name = "PyAMF"
description = "AMF support for Python"
long_description = setupinfo.read('README.rst')
url = "http://pyamf.org"
author = "The PyAMF Project"
author_email = "users@pyamf.org"
license = "MIT License"
classifiers = """
Framework :: Django
Framework :: Pylons
Framework :: Twisted
Intended Audience :: Developers
Intended Audience :: Information Technology
License :: OSI Approved :: MIT License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: C
Programming Language :: Python
Programming Language :: Cython
Programming Language :: Python :: 2.4
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Topic :: Internet :: WWW/HTTP :: WSGI :: Application
Topic :: Software Development :: Libraries :: Python Modules
"""
keywords = """
amf amf0 amf3 flex flash remoting rpc http flashplayer air bytearray
objectproxy arraycollection recordset actionscript decoder encoder gateway
remoteobject twisted pylons django sharedobject lso sol
"""
def setup_package():
setupinfo.set_version(version)
setupinfo.write_version_py()
setup(
name=name,
version=setupinfo.get_version(),
description=description,
long_description=long_description,
url=url,
author=author,
author_email=author_email,
keywords=keywords.strip(),
license=license,
packages=find_packages(),
ext_modules=setupinfo.get_extensions(),
install_requires=setupinfo.get_install_requirements(),
tests_require=setupinfo.get_test_requirements(),
test_suite="pyamf.tests.get_suite",
zip_safe=False,
extras_require=setupinfo.get_extras_require(),
classifiers=(
filter(None, classifiers.strip().split('\n')) +
setupinfo.get_trove_classifiers()
),
**setupinfo.extra_setup_args())
if __name__ == '__main__':
setup_package()
| #!/usr/bin/env python
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
# import ordering is important
import setupinfo
from setuptools import setup, find_packages
version = (0, 7, 2)
name = "PyAMF"
description = "AMF support for Python"
long_description = setupinfo.read('README.rst')
url = "http://pyamf.org"
author = "The PyAMF Project"
author_email = "users@pyamf.org"
license = "MIT License"
classifiers = """
Framework :: Django
Framework :: Pylons
Framework :: Twisted
Intended Audience :: Developers
Intended Audience :: Information Technology
License :: OSI Approved :: MIT License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: C
Programming Language :: Python
Programming Language :: Cython
Programming Language :: Python :: 2.4
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Topic :: Internet :: WWW/HTTP :: WSGI :: Application
Topic :: Software Development :: Libraries :: Python Modules
"""
keywords = """
amf amf0 amf3 flex flash remoting rpc http flashplayer air bytearray
objectproxy arraycollection recordset actionscript decoder encoder gateway
remoteobject twisted pylons django sharedobject lso sol
"""
def setup_package():
setupinfo.set_version(version)
setupinfo.write_version_py()
setup(
name=name,
version=setupinfo.get_version(),
description=description,
long_description=long_description,
url=url,
author=author,
author_email=author_email,
keywords=keywords.strip(),
license=license,
packages=find_packages(),
ext_modules=setupinfo.get_extensions(),
install_requires=setupinfo.get_install_requirements(),
tests_require=setupinfo.get_test_requirements(),
test_suite="pyamf.tests.get_suite",
zip_safe=False,
extras_require=setupinfo.get_extras_require(),
classifiers=(
filter(None, classifiers.strip().split('\n')) +
setupinfo.get_trove_classifiers()
),
**setupinfo.extra_setup_args())
if __name__ == '__main__':
setup_package()
| mit | Python |
aa8ca0d4e7a88d6d5f67289c36652c182092f81e | Update setup.py to work for new packages | cpcloud/numba,gmarkall/numba,sklam/numba,stuartarchibald/numba,shiquanwang/numba,jriehl/numba,cpcloud/numba,seibert/numba,sklam/numba,pitrou/numba,stuartarchibald/numba,stuartarchibald/numba,pombredanne/numba,seibert/numba,ssarangi/numba,stefanseefeld/numba,shiquanwang/numba,stuartarchibald/numba,shiquanwang/numba,GaZ3ll3/numba,IntelLabs/numba,IntelLabs/numba,pitrou/numba,GaZ3ll3/numba,gmarkall/numba,jriehl/numba,GaZ3ll3/numba,IntelLabs/numba,pombredanne/numba,stonebig/numba,stonebig/numba,numba/numba,ssarangi/numba,stefanseefeld/numba,cpcloud/numba,numba/numba,cpcloud/numba,jriehl/numba,sklam/numba,ssarangi/numba,numba/numba,gdementen/numba,cpcloud/numba,stonebig/numba,pitrou/numba,jriehl/numba,IntelLabs/numba,pombredanne/numba,seibert/numba,sklam/numba,pitrou/numba,seibert/numba,seibert/numba,pombredanne/numba,gdementen/numba,GaZ3ll3/numba,stuartarchibald/numba,stefanseefeld/numba,stonebig/numba,IntelLabs/numba,gdementen/numba,gmarkall/numba,numba/numba,pombredanne/numba,stonebig/numba,gmarkall/numba,jriehl/numba,numba/numba,sklam/numba,ssarangi/numba,GaZ3ll3/numba,ssarangi/numba,stefanseefeld/numba,pitrou/numba,gdementen/numba,stefanseefeld/numba,gmarkall/numba,gdementen/numba | setup.py | setup.py | import re
import sys
from os.path import join
from distutils.core import setup, Extension
import numpy
if sys.version_info[:2] < (2, 5):
raise Exception('numba requires Python 2.5 or greater.')
kwds = {}
kwds['long_description'] = open('README').read()
setup(
name = "numba",
author = "Travis Oliphant",
author_email = "travis@continuum.io",
url = "https://github.com/ContinuumIO/numba",
license = "BSD",
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Topic :: Utilities",
],
description = "compiling Python code for NumPy",
packages = ["numba", "numba.pymothoa", "numba.minivect"],
ext_modules = [Extension(name = "numba._ext",
sources = ["numba/_ext.c"],
include_dirs=[numpy.get_include()])],
version = '0.1'
)
| import re
import sys
from os.path import join
from distutils.core import setup, Extension
import numpy
if sys.version_info[:2] < (2, 5):
raise Exception('numba requires Python 2.5 or greater.')
kwds = {}
kwds['long_description'] = open('README').read()
setup(
name = "numba",
author = "Travis Oliphant",
author_email = "travis@continuum.io",
url = "https://github.com/ContinuumIO/numba",
license = "BSD",
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Topic :: Utilities",
],
description = "compiling Python code for NumPy",
packages = ["numba"],
ext_modules = [Extension(name = "numba._ext",
sources = ["numba/_ext.c"],
include_dirs=[numpy.get_include()])],
version = '0.1'
)
| bsd-2-clause | Python |
c93de3b854772e6792c2af5f132b762d9cedfb11 | Add missing dependency | tiagocoutinho/qredis | setup.py | setup.py | from setuptools import setup, find_packages
def get_readme(name="README.md"):
with open(name) as f:
return f.read()
requirements = ["redis", "qtpy", "PyQt5", "msgpack", "msgpack-numpy"]
setup(
name="qredis",
version="0.5.1",
description="Qt based Redis GUI",
long_description=get_readme(),
long_description_content_type="text/markdown",
author="Tiago Coutinho",
author_email="coutinhotiago@gmail.com",
url="https://github.com/tiagocoutinho/qredis",
packages=find_packages(),
package_data={"qredis.images": ["*.png"], "qredis.ui": ["*.ui"]},
entry_points={"console_scripts": ["qredis=qredis.window:main"]},
install_requires=requirements,
keywords="redis,GUI,Qt",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.5",
)
| from setuptools import setup, find_packages
def get_readme(name="README.md"):
with open(name) as f:
return f.read()
requirements = ["redis", "PyQt5", "msgpack", "msgpack-numpy"]
setup(
name="qredis",
version="0.5.1",
description="Qt based Redis GUI",
long_description=get_readme(),
long_description_content_type="text/markdown",
author="Tiago Coutinho",
author_email="coutinhotiago@gmail.com",
url="https://github.com/tiagocoutinho/qredis",
packages=find_packages(),
package_data={"qredis.images": ["*.png"], "qredis.ui": ["*.ui"]},
entry_points={"console_scripts": ["qredis=qredis.window:main"]},
install_requires=requirements,
keywords="redis,GUI,Qt",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
python_requires=">=3.5",
)
| mit | Python |
d03bf67ad8fec7b8c96b4f0c238cd423e2f577c0 | read version from landsat folder | developmentseed/landsat-util,DougFirErickson/landsat-util,njwilson23/landsat-util,ircwaves/landsat-util,mileswwatkins/landsat-util,simonemurzilli/landsat-util,RoboDonut/landsat-util,jalalomary/landsat-util,DougFirErickson/landsat-util,simonemurzilli/landsat-util,citterio/landsat-util,mileswwatkins/landsat-util | setup.py | setup.py | #!/usr/bin/env python
# Landsat Util
# License: CC0 1.0 Universal
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from landsat import __version__
def readme():
with open("README.rst") as f:
return f.read()
test_requirements = [
'nose==1.3.3'
]
setup(
name="landsat",
version=__version__,
description="A utility to search, download and process Landsat 8" +
" satellite imagery",
long_description=readme(),
author="Scisco",
author_email="alireza@developmentseed.org",
scripts=["bin/landsat"],
url="https://github.com/developmentseed/landsat-util",
packages=["landsat"],
include_package_data=True,
license="CCO",
platforms="Posix; MacOS X; Windows",
install_requires=[
"requests==2.5.3",
"python-dateutil==2.2",
"numpy==1.9.1",
"termcolor==1.1.0",
"rasterio==0.18",
"six==1.9.0",
"scikit-image==0.10.1",
"homura==0.1.0"
],
test_suite='nose.collector',
test_require=test_requirements
)
| #!/usr/bin/env python
# Landsat Util
# License: CC0 1.0 Universal
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def readme():
with open("README.rst") as f:
return f.read()
test_requirements = [
'nose==1.3.3'
]
setup(
name="landsat",
version='0.4.5',
description="A utility to search, download and process Landsat 8" +
" satellite imagery",
long_description=readme(),
author="Scisco",
author_email="alireza@developmentseed.org",
scripts=["bin/landsat"],
url="https://github.com/developmentseed/landsat-util",
packages=["landsat"],
include_package_data=True,
license="CCO",
platforms="Posix; MacOS X; Windows",
install_requires=[
"requests==2.5.3",
"python-dateutil==2.2",
"numpy==1.9.1",
"termcolor==1.1.0",
"rasterio==0.18",
"six==1.9.0",
"scikit-image==0.10.1",
"homura==0.1.0"
],
test_suite='nose.collector',
test_require=test_requirements
)
| cc0-1.0 | Python |
7a707cc66380482ef6fb5da93e42b898f7983dcc | add nosetests | knoguchi/acc | setup.py | setup.py | from setuptools import setup, find_packages
import sys, os
version = '0.0'
setup(name='acc',
version=version,
description="An implementation of Accounting Pattern by Martin Fowler",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='accounting',
author='Kenji Noguchi',
author_email='tokyo246@gmail.com',
url='http://kenix.org/acc',
license='Apache License Version 2.0',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
"python-money"
],
entry_points="""
# -*- Entry points: -*-
""",
setup_requires=['nose>=1.0']
)
| from setuptools import setup, find_packages
import sys, os
version = '0.0'
setup(name='acc',
version=version,
description="An implementation of Accounting Pattern by Martin Fowler",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='accounting',
author='Kenji Noguchi',
author_email='tokyo246@gmail.com',
url='http://kenix.org/acc',
license='Apache License Version 2.0',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
"python-money"
],
entry_points="""
# -*- Entry points: -*-
""",
)
| apache-2.0 | Python |
035dc6c0815089e728b5ee78738c4795367b1d94 | switch to pyannote.core 1.0 and pyannote.database 0.12 | hbredin/pyannote-generators,pyannote/pyannote-generators | setup.py | setup.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import versioneer
from setuptools import setup, find_packages
setup(
# package
namespace_packages=['pyannote'],
packages=find_packages(),
install_requires=[
'pyannote.core >= 1.0.4',
'pyannote.database >= 0.12'
],
# versioneer
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
# PyPI
name='pyannote.generators',
description=('Generators'),
author='Hervé Bredin',
author_email='bredin@limsi.fr',
url='http://herve.niderb.fr/',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering"
],
)
| #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import versioneer
from setuptools import setup, find_packages
setup(
# package
namespace_packages=['pyannote'],
packages=find_packages(),
install_requires=[
'pyannote.core >= 0.13.2',
'pyannote.database >= 0.11'
],
# versioneer
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
# PyPI
name='pyannote.generators',
description=('Generators'),
author='Hervé Bredin',
author_email='bredin@limsi.fr',
url='http://herve.niderb.fr/',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering"
],
)
| mit | Python |
f4b640783135ec93eb0bf0403c4be042144a0303 | add dependencies to `install_requires`. | thruflo/pyramid_weblayer | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import dirname, join as join_path
from setuptools import setup, find_packages
def _read(file_name):
sock = open(file_name)
text = sock.read()
sock.close()
return text
setup(
name = 'pyramid_weblayer',
version = '0.8.5',
description = 'A re-factor of some parts of weblayer for use within Pyramid.',
author = 'James Arthur',
author_email = 'username: thruflo, domain: gmail.com',
url = 'http://github.com/thruflo/pyramid_weblayer',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Framework :: Pylons',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license = _read('UNLICENSE').split('\n')[0],
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data = True,
zip_safe = False,
install_requires=[
'html2text',
'markdown2',
'pyga',
'pyramid',
'pyramid_basemodel',
'transaction',
'zope.interface'
],
tests_require = [
'coverage',
'nose',
'mock',
'WebTest>=1.3.1',
]
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import dirname, join as join_path
from setuptools import setup, find_packages
def _read(file_name):
sock = open(file_name)
text = sock.read()
sock.close()
return text
setup(
name = 'pyramid_weblayer',
version = '0.8.5',
description = 'A re-factor of some parts of weblayer for use within Pyramid.',
author = 'James Arthur',
author_email = 'username: thruflo, domain: gmail.com',
url = 'http://github.com/thruflo/pyramid_weblayer',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Framework :: Pylons',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license = _read('UNLICENSE').split('\n')[0],
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data = True,
zip_safe = False,
install_requires=[
'html2text',
'markdown2',
'pyga',
'pyramid',
'transaction'
],
tests_require = [
'coverage',
'nose',
'mock',
'WebTest>=1.3.1',
]
)
| unlicense | Python |
ba9bfad2d015b615e89e74957f6f3cc6567b27e8 | Use kfac_jax@HEAD due to API changes in the latest version of JAX. | deepmind/ferminet | setup.py | setup.py | # Copyright 2020 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for pip package."""
import unittest
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'absl-py',
'attrs',
'chex',
'h5py',
'jax',
'jaxlib',
# TODO(b/230487443) - use released version of kfac.
'kfac_jax @ git+https://github.com/deepmind/kfac-jax',
'ml-collections',
'optax',
'numpy',
'pandas',
'pyscf',
'pyblock',
'scipy',
'tables',
'typing_extensions',
]
def ferminet_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('ferminet/tests', pattern='*_test.py')
return test_suite
setup(
name='ferminet',
version='0.2',
description='A library to train networks to represent ground state wavefunctions of fermionic systems',
url='https://github.com/deepmind/ferminet',
author='DeepMind',
author_email='no-reply@google.com',
# Contained modules and scripts.
scripts=['bin/ferminet'],
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
extras_require={'testing': ['flake8', 'pylint', 'pytest', 'pytype']},
platforms=['any'],
license='Apache 2.0',
test_suite='setup.ferminet_test_suite',
)
| # Copyright 2020 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for pip package."""
import unittest
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'absl-py',
'attrs',
'chex',
'h5py',
'jax',
'jaxlib',
'kfac_jax',
'ml-collections',
'optax',
'numpy',
'pandas',
'pyscf',
'pyblock',
'scipy',
'tables',
'typing_extensions',
]
def ferminet_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('ferminet/tests', pattern='*_test.py')
return test_suite
setup(
name='ferminet',
version='0.2',
description='A library to train networks to represent ground state wavefunctions of fermionic systems',
url='https://github.com/deepmind/ferminet',
author='DeepMind',
author_email='no-reply@google.com',
# Contained modules and scripts.
scripts=['bin/ferminet'],
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
extras_require={'testing': ['flake8', 'pylint', 'pytest', 'pytype']},
platforms=['any'],
license='Apache 2.0',
test_suite='setup.ferminet_test_suite',
)
| apache-2.0 | Python |
8df792393f0efec6df074318a0f65cddf8eb2335 | change version to 0.0.11 | xclxxl414/rqalpha,xclxxl414/rqalpha | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
from pip.req import parse_requirements
setup(
name='rqbacktest',
version='0.0.11',
description='Python Distribution Utilities',
packages=find_packages(exclude=[]),
author='ricequant',
author_email='public@ricequant.com',
url='https://www.ricequant.com/',
install_requires=[str(ir.req) for ir in parse_requirements("requirements.txt", session=False)],
zip_safe=False,
entry_points={
"console_scripts": [
"rqbacktest = rqbacktest.__main__:entry_point",
]
},
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
from pip.req import parse_requirements
setup(
name='rqbacktest',
version='0.0.10',
description='Python Distribution Utilities',
packages=find_packages(exclude=[]),
author='ricequant',
author_email='public@ricequant.com',
url='https://www.ricequant.com/',
install_requires=[str(ir.req) for ir in parse_requirements("requirements.txt", session=False)],
zip_safe=False,
entry_points={
"console_scripts": [
"rqbacktest = rqbacktest.__main__:entry_point",
]
},
)
| apache-2.0 | Python |
b48ec701926fe449540fb9f794da5c11808b825b | change my email address | kevinoid/py-gnupg | setup.py | setup.py | import GnuPGInterface
import distutils.core
long_description = """
GnuPGInterface is a Python module to interface with GnuPG.
It concentrates on interacting with GnuPG via filehandles,
providing access to control GnuPG via versatile and extensible means.
This module is based on GnuPG::Interface, a Perl module by the same author.
"""
distutils.core.setup( name = 'GnuPGInterface',
version = GnuPGInterface.__version__,
description = 'GnuPG interactions with file handles',
long_description = long_description,
author = 'Frank J. Tobin',
author_email = 'ftobin@neverending.org',
license = 'LGPL',
platforms = 'POSIX',
keywords = 'GnuPG gpg',
url = 'http://py-gnupg.sourceforge.net/',
py_modules = [ 'GnuPGInterface' ]
)
| import GnuPGInterface
import distutils.core
long_description = """
GnuPGInterface is a Python module to interface with GnuPG.
It concentrates on interacting with GnuPG via filehandles,
providing access to control GnuPG via versatile and extensible means.
This module is based on GnuPG::Interface, a Perl module by the same author.
"""
distutils.core.setup( name = 'GnuPGInterface',
version = GnuPGInterface.__version__,
description = 'GnuPG interactions with file handles',
long_description = long_description,
author = 'Frank J. Tobin',
author_email = 'ftobin@users.sourceforge.net',
license = 'LGPL',
platforms = 'POSIX',
keywords = 'GnuPG gpg',
url = 'http://py-gnupg.sourceforge.net/',
py_modules = [ 'GnuPGInterface' ]
)
| lgpl-2.1 | Python |
cbc94311219ca7edea6f86fcc429dd7f6e4bc894 | Remove old trove classifiers for Python 3.3 and 3.4 | Eyepea/aiosip,sangoma/aiosip | setup.py | setup.py | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'multidict>=2.0',
'pyquery',
'aiodns'
]
test_requirements = [
'pytest'
]
setup(
name='aiosip',
version='0.2.0',
description='SIP support for AsyncIO',
long_description=readme + '\n\n' + history,
author='Ludovic Gasc (GMLudo)',
author_email='gmludo@gmail.com',
url='https://github.com/Eyepea/aiosip',
packages=[
'aiosip',
],
package_dir={'aiosip':
'aiosip'},
include_package_data=True,
install_requires=requirements,
license="Apache 2",
zip_safe=False,
keywords=['asyncio', 'sip', 'telephony'],
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Communications',
'Topic :: Communications :: Internet Phone',
'Topic :: Communications :: Telephony',
],
test_suite='tests',
tests_require=test_requirements
)
| #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'multidict>=2.0',
'pyquery',
'aiodns'
]
test_requirements = [
'pytest'
]
setup(
name='aiosip',
version='0.2.0',
description='SIP support for AsyncIO',
long_description=readme + '\n\n' + history,
author='Ludovic Gasc (GMLudo)',
author_email='gmludo@gmail.com',
url='https://github.com/Eyepea/aiosip',
packages=[
'aiosip',
],
package_dir={'aiosip':
'aiosip'},
include_package_data=True,
install_requires=requirements,
license="Apache 2",
zip_safe=False,
keywords=['asyncio', 'sip', 'telephony'],
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Communications',
'Topic :: Communications :: Internet Phone',
'Topic :: Communications :: Telephony',
],
test_suite='tests',
tests_require=test_requirements
)
| apache-2.0 | Python |
347279a0e82769eac6430806f7adb9c907155465 | add qimvn to setup.py | dmerejkowsky/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild | setup.py | setup.py | ## Copyright (c) 2012, 2013 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
from distutils.core import setup
packages = [
"qisys",
"qisys.actions",
"qisrc",
"qisrc.actions",
"qibuild",
"qibuild.actions",
"qibuild.cmake",
"qilinguist",
"qilinguist.actions",
"qitoolchain",
"qitoolchain.actions",
"qitoolchain.binary_package",
"qimvn",
"qimvn.actions"
]
scripts = [
"python/bin/qidoc",
"python/bin/qilinguist",
"python/bin/qisrc",
"python/bin/qibuild",
"python/bin/qitoolchain",
"python/bin/qimvn",
]
package_data = {
"qisrc" : ["templates/project/CMakeLists.txt",
"templates/project/main.cpp",
"templates/project/test.cpp",
"templates/project/qiproject.xml"
],
}
def get_qibuild_cmake_files():
res = list()
cmake_dest = 'share/cmake'
for (root, directories, filenames) in os.walk('cmake'):
rel_root = os.path.relpath(root, 'cmake')
if rel_root == ".":
rel_root = ""
rel_filenames = [os.path.join('cmake', rel_root, x) for x in filenames]
rel_dest = os.path.join(cmake_dest, rel_root)
res.append((rel_dest, rel_filenames))
return res
data_files = get_qibuild_cmake_files()
setup(name="qibuild",
version="3.1",
description="Compilation of C++ projects made easy!",
author="Aldebaran Robotics",
author_email="dmerejkowsky@aldebaran-robotics.com",
py_modules=['qicd'],
packages=packages,
package_dir={'': 'python'},
package_data=package_data,
data_files=data_files,
license="BSD",
scripts=scripts
)
| ## Copyright (c) 2012, 2013 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
from distutils.core import setup
packages = [
"qisys",
"qisys.actions",
"qisrc",
"qisrc.actions",
"qibuild",
"qibuild.actions",
"qibuild.cmake",
"qilinguist",
"qilinguist.actions",
"qitoolchain",
"qitoolchain.actions",
"qitoolchain.binary_package",
"qimvn",
"qimvn.actions"
]
scripts = [
"python/bin/qidoc",
"python/bin/qilinguist",
"python/bin/qisrc",
"python/bin/qibuild",
"python/bin/qitoolchain",
]
package_data = {
"qisrc" : ["templates/project/CMakeLists.txt",
"templates/project/main.cpp",
"templates/project/test.cpp",
"templates/project/qiproject.xml"
],
}
def get_qibuild_cmake_files():
res = list()
cmake_dest = 'share/cmake'
for (root, directories, filenames) in os.walk('cmake'):
rel_root = os.path.relpath(root, 'cmake')
if rel_root == ".":
rel_root = ""
rel_filenames = [os.path.join('cmake', rel_root, x) for x in filenames]
rel_dest = os.path.join(cmake_dest, rel_root)
res.append((rel_dest, rel_filenames))
return res
data_files = get_qibuild_cmake_files()
setup(name="qibuild",
version="3.1",
description="Compilation of C++ projects made easy!",
author="Aldebaran Robotics",
author_email="dmerejkowsky@aldebaran-robotics.com",
py_modules=['qicd'],
packages=packages,
package_dir={'': 'python'},
package_data=package_data,
data_files=data_files,
license="BSD",
scripts=scripts
)
| bsd-3-clause | Python |
8312ac22c444b895bab9f2a3707e4d4a7ccc40b2 | Remove sphinx pinning since 1.7.6 has been released. | vertexproject/synapse,vertexproject/synapse,vertexproject/synapse | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='synapse',
version='0.1.0a1',
description='Synapse Distributed Key-Value Hypergraph Analysis Framework',
author='Invisigoth Kenshoto',
author_email='invisigoth.kenshoto@gmail.com',
url='https://github.com/vertexproject/synapse',
license='Apache License 2.0',
packages=find_packages(exclude=['scripts',
]),
include_package_data=True,
install_requires=[
'pyOpenSSL>=16.2.0,<18.0.0',
'msgpack==0.5.1',
'xxhash>=1.0.1,<2.0.0',
'lmdb>=0.94,<1.0.0',
'tornado>=5.1,<6.0.0',
'regex>=2017.9.23',
'PyYAML>=3.13,<4.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Software Distribution',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='synapse',
version='0.1.0a1',
description='Synapse Distributed Key-Value Hypergraph Analysis Framework',
author='Invisigoth Kenshoto',
author_email='invisigoth.kenshoto@gmail.com',
url='https://github.com/vertexproject/synapse',
license='Apache License 2.0',
packages=find_packages(exclude=['scripts',
]),
include_package_data=True,
install_requires=[
'pyOpenSSL>=16.2.0,<18.0.0',
'msgpack==0.5.1',
'xxhash>=1.0.1,<2.0.0',
'lmdb>=0.94,<1.0.0',
'tornado>=5.1,<6.0.0',
'regex>=2017.9.23',
'PyYAML>=3.13,<4.0',
'sphinx==1.7.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Software Distribution',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| apache-2.0 | Python |
9ebc0daaa86cb268f395670f6b38cd3fdc0dbc4e | Add packages to setup to run tests | annpaul89/datetime_utils | setup.py | setup.py | from setuptools import setup
setup(
name='datetime_utils',
version='0.1',
license='MIT',
packages=[
'datetime_utils',
],
url='https://github.com/annpaul89/datetime_utils',
author='Ann Paul',
author_email='ann.mpaul@gmail.com',
description='Python functions for common operations on datetime instances',
install_requires=[
'pytz>=2014.10',
],
classifiers = [
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords = 'python datetime pytz timezone timedelta arithmetic round floor period conversion',
test_suite = 'nose.collector',
tests_require = [
'coverage==3.7.1',
'nose>=1.3.0',
],
) | from distutils.core import setup
setup(
name='datetime_utils',
version='0.1',
packages=[
'datetime_utils',
],
url='https://github.com/annpaul89/datetime_utils',
description='Python functions for common operations on datetime instances',
install_requires=[
'pytz>=2016.4',
]
) | mit | Python |
5a3e0859fe7774fc8caee54ff498f324a89be9e3 | install command line scripts to bin/ when installing the package | williballenthin/python-evtx | setup.py | setup.py | #!/usr/bin/env python
import setuptools
long_description="""python-evtx is a pure Python parser for recent Windows Event Log files (those with the file extension ".evtx"). The module provides programmatic access to the File and Chunk headers, record templates, and event entries. For example, you can use python-evtx to review the event logs of Windows 7 systems from a Mac or Linux workstation. The structure definitions and parsing strategies were heavily inspired by the work of Andreas Schuster and his Perl implementation "Parse-Evtx"."""
setuptools.setup(name="python-evtx",
version="0.5.3",
description="Pure Python parser for recent Windows event log files (.evtx).",
long_description=long_description,
author="Willi Ballenthin",
author_email="willi.ballenthin@gmail.com",
url="https://github.com/williballenthin/python-evtx",
license="Apache 2.0 License",
packages=setuptools.find_packages(),
install_requires=['hexdump', 'six'],
scripts = ['scripts/evtx_dump',
'scripts/evtx_dump_chunk_slack',
'scripts/evtx_eid_record_numbers',
'scripts/evtx_extract_record',
'scripts/evtx_filter_records',
'scripts/evtx_find_bugs',
'scripts/evtx_get_pretty_record',
'scripts/evtx_info',
'scripts/evtx_record_structure',
'scripts/evtx_structure',
'scripts/evtx_templates']
)
| #!/usr/bin/env python
import setuptools
long_description="""python-evtx is a pure Python parser for recent Windows Event Log files (those with the file extension ".evtx"). The module provides programmatic access to the File and Chunk headers, record templates, and event entries. For example, you can use python-evtx to review the event logs of Windows 7 systems from a Mac or Linux workstation. The structure definitions and parsing strategies were heavily inspired by the work of Andreas Schuster and his Perl implementation "Parse-Evtx"."""
setuptools.setup(name="python-evtx",
version="0.5.3",
description="Pure Python parser for recent Windows event log files (.evtx).",
long_description=long_description,
author="Willi Ballenthin",
author_email="willi.ballenthin@gmail.com",
url="https://github.com/williballenthin/python-evtx",
license="Apache 2.0 License",
packages=setuptools.find_packages(),
install_requires=['hexdump', 'six'],
)
| apache-2.0 | Python |
c793fe512903fd381e7af0e3d85e375c6653e8fb | remove unique constraint in old migration which leads the migration to break if data is pressent in the database | TheMangalex/pyfeedback,TheMangalex/pyfeedback,TheMangalex/pyfeedback,d120/pyfeedback,TheMangalex/pyfeedback,TheMangalex/pyfeedback,d120/pyfeedback,d120/pyfeedback,d120/pyfeedback | src/feedback/migrations/0030_auto_20170108_1300.py | src/feedback/migrations/0030_auto_20170108_1300.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-08 13:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedback', '0029_auto_20161227_1258'),
]
operations = [
migrations.AlterField(
model_name='barcodeallowedstate',
name='allow_state',
field=models.IntegerField(choices=[(100, 'Angelegt'), (200, 'Bestellung ge\xf6ffnet'), (300, 'Keine Evaluation'), (500, 'Bestellung liegt vor'), (600, 'Gedruckt'), (700, 'Versandt'), (800, 'B\xf6gen eingegangen'), (900, 'B\xf6gen gescannt'), (1000, 'Ergebnisse versandt')], null=True),
),
migrations.AlterField(
model_name='log',
name='status',
field=models.IntegerField(choices=[(100, 'Angelegt'), (200, 'Bestellung ge\xf6ffnet'), (300, 'Keine Evaluation'), (500, 'Bestellung liegt vor'), (600, 'Gedruckt'), (700, 'Versandt'), (800, 'B\xf6gen eingegangen'), (900, 'B\xf6gen gescannt'), (1000, 'Ergebnisse versandt')], default=100),
),
migrations.AlterField(
model_name='veranstaltung',
name='status',
field=models.IntegerField(choices=[(100, 'Angelegt'), (200, 'Bestellung ge\xf6ffnet'), (300, 'Keine Evaluation'), (500, 'Bestellung liegt vor'), (600, 'Gedruckt'), (700, 'Versandt'), (800, 'B\xf6gen eingegangen'), (900, 'B\xf6gen gescannt'), (1000, 'Ergebnisse versandt')], default=100),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-08 13:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedback', '0029_auto_20161227_1258'),
]
operations = [
migrations.AlterField(
model_name='barcodeallowedstate',
name='allow_state',
field=models.IntegerField(choices=[(100, 'Angelegt'), (200, 'Bestellung ge\xf6ffnet'), (300, 'Keine Evaluation'), (500, 'Bestellung liegt vor'), (600, 'Gedruckt'), (700, 'Versandt'), (800, 'B\xf6gen eingegangen'), (900, 'B\xf6gen gescannt'), (1000, 'Ergebnisse versandt')], null=True, unique=True),
),
migrations.AlterField(
model_name='log',
name='status',
field=models.IntegerField(choices=[(100, 'Angelegt'), (200, 'Bestellung ge\xf6ffnet'), (300, 'Keine Evaluation'), (500, 'Bestellung liegt vor'), (600, 'Gedruckt'), (700, 'Versandt'), (800, 'B\xf6gen eingegangen'), (900, 'B\xf6gen gescannt'), (1000, 'Ergebnisse versandt')], default=100),
),
migrations.AlterField(
model_name='veranstaltung',
name='status',
field=models.IntegerField(choices=[(100, 'Angelegt'), (200, 'Bestellung ge\xf6ffnet'), (300, 'Keine Evaluation'), (500, 'Bestellung liegt vor'), (600, 'Gedruckt'), (700, 'Versandt'), (800, 'B\xf6gen eingegangen'), (900, 'B\xf6gen gescannt'), (1000, 'Ergebnisse versandt')], default=100),
),
]
| agpl-3.0 | Python |
c956c0e42657a13642f2bee989ca538ecd43691d | change license to BSD to resolve Twine issue | watsonpy/watson-di | setup.py | setup.py | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
import watson.di
name = 'watson-di'
description = 'Dependency Injection made simple.'
version = watson.di.__version__
def read(filename, as_list=False):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
contents = f.read()
if as_list:
return contents.splitlines()
return contents
setup(
name=name,
version=version,
url='http://github.com/watsonpy/' + name,
description=description,
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='Simon Coulton',
author_email='simon@bespohk.com',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=read('requirements.txt', as_list=True),
extras_require={
'test': read('requirements-test.txt', as_list=True)
},
)
| # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
import watson.di
name = 'watson-di'
description = 'Dependency Injection made simple.'
version = watson.di.__version__
def read(filename, as_list=False):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
contents = f.read()
if as_list:
return contents.splitlines()
return contents
setup(
name=name,
version=version,
url='http://github.com/watsonpy/' + name,
description=description,
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='Simon Coulton',
author_email='simon@bespohk.com',
license=read('LICENSE'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=read('requirements.txt', as_list=True),
extras_require={
'test': read('requirements-test.txt', as_list=True)
},
)
| bsd-3-clause | Python |
1c4d4806ada333f3939d84be0fcb0a8df4cd059b | Bump to 0.0.3-dev | cdunklau/fbemissary | setup.py | setup.py | import os
from setuptools import setup, find_packages
requires = [
'aiohttp',
'attrs',
]
classifiers = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Framework :: AsyncIO',
'Topic :: Internet',
'Topic :: Communications :: Chat',
'License :: OSI Approved :: Apache Software License',
'Development Status :: 2 - Pre-Alpha',
]
description = (
'A bot framework for the Facebook Messenger platform, '
'built on asyncio and aiohttp'
)
setup(
name='fbemissary',
version='0.0.3-dev',
description=description,
author='Colin Dunklau',
author_email='colin.dunklau@gmail.com',
url='https://github.com/cdunklau/fbemissary',
classifiers=classifiers,
packages=find_packages(include=['fbemissary', 'fbemissary.tests']),
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
| import os
from setuptools import setup, find_packages
requires = [
'aiohttp',
'attrs',
]
classifiers = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Framework :: AsyncIO',
'Topic :: Internet',
'Topic :: Communications :: Chat',
'License :: OSI Approved :: Apache Software License',
'Development Status :: 2 - Pre-Alpha',
]
description = (
'A bot framework for the Facebook Messenger platform, '
'built on asyncio and aiohttp'
)
setup(
name='fbemissary',
version='0.0.2',
description=description,
author='Colin Dunklau',
author_email='colin.dunklau@gmail.com',
url='https://github.com/cdunklau/fbemissary',
classifiers=classifiers,
packages=find_packages(include=['fbemissary', 'fbemissary.tests']),
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
| apache-2.0 | Python |
ae1aa047d7d25e6ea6e357ce31dfc085b341f4a7 | add bruker_test_data subdirs | kaustubhmote/nmrglue,jjhelmus/nmrglue,kaustubhmote/nmrglue,jjhelmus/nmrglue | setup.py | setup.py | #!/usr/bin/env python
# setup script for nmrglue
from distutils.core import setup
from codecs import open
from os import path, walk
here = path.abspath(path.dirname(__file__))
# get long description from README
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='nmrglue',
version='0.7-dev', # change this in nmrglue/__init__.py also
description='A module for working with NMR data in Python',
long_description=long_description,
url='http://www.nmrglue.com',
author='Jonathan J. Helmus',
author_email='jjhelmus@gmail.com',
license='New BSD License',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux'],
requires=['numpy', 'scipy'],
packages=[
'nmrglue',
'nmrglue.analysis',
'nmrglue.analysis.tests',
'nmrglue.fileio',
'nmrglue.fileio.tests',
'nmrglue.process',
'nmrglue.process.nmrtxt',
'nmrglue.util'],
package_data={'nmrglue': [
'fileio/tests/data/*.f*',
'fileio/tests/data/*.dir/*',
'fileio/tests/bruker_test_data/*',
'fileio/tests/bruker_test_data/1/*',
'fileio/tests/bruker_test_data/1/pdata/*',
'fileio/tests/bruker_test_data/1/pdata/1/*',
'fileio/tests/data/test.tab']},
)
| #!/usr/bin/env python
# setup script for nmrglue
from distutils.core import setup
from codecs import open
from os import path, walk
here = path.abspath(path.dirname(__file__))
# get long description from README
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='nmrglue',
version='0.7-dev', # change this in nmrglue/__init__.py also
description='A module for working with NMR data in Python',
long_description=long_description,
url='http://www.nmrglue.com',
author='Jonathan J. Helmus',
author_email='jjhelmus@gmail.com',
license='New BSD License',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux'],
requires=['numpy', 'scipy'],
packages=[
'nmrglue',
'nmrglue.analysis',
'nmrglue.analysis.tests',
'nmrglue.fileio',
'nmrglue.fileio.tests',
'nmrglue.process',
'nmrglue.process.nmrtxt',
'nmrglue.util'],
package_data={'nmrglue': [
'fileio/tests/data/*.f*',
'fileio/tests/data/*.dir/*',
'fileio/tests/bruker_test_data/*',
'fileio/tests/data/test.tab']},
)
| bsd-3-clause | Python |
c52b2fc3f4a8cb20b2a692e4ae94575fa849c251 | Update version to v0.5.0. | edelooff/sqlalchemy-json | setup.py | setup.py | import os
from setuptools import setup
def contents(filename):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, filename)) as fp:
return fp.read()
setup(
name="sqlalchemy-json",
version="0.5.0",
author="Elmer de Looff",
author_email="elmer.delooff@gmail.com",
description="JSON type with nested change tracking for SQLAlchemy",
long_description=contents("README.rst"),
keywords="sqlalchemy json mutable",
license="BSD",
url="https://github.com/edelooff/sqlalchemy-json",
packages=["sqlalchemy_json"],
install_requires=[
"six",
"sqlalchemy>=0.7"],
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database"])
| import os
from setuptools import setup
def contents(filename):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, filename)) as fp:
return fp.read()
setup(
name="sqlalchemy-json",
version="0.4.0",
author="Elmer de Looff",
author_email="elmer.delooff@gmail.com",
description="JSON type with nested change tracking for SQLAlchemy",
long_description=contents("README.rst"),
keywords="sqlalchemy json mutable",
license="BSD",
url="https://github.com/edelooff/sqlalchemy-json",
packages=["sqlalchemy_json"],
install_requires=[
"six",
"sqlalchemy>=0.7"],
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database"])
| bsd-2-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.