commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
4e57f73597f8d5dc3ccee9d815657a774dc52d62 | Handle package dirs correctly | src/setuptools_epydoc/__init__.py | src/setuptools_epydoc/__init__.py | import os
import sys
import re
from setuptools import Command
class EpydocCommand(Command):
'''
Setuptools command used to build an API documentation with epydoc.
@author: jwienke
'''
user_options = [('format=', 'f',
'the output format to use (html and pdf)'),
('config=', 'c',
'Epydoc configuration file'),
('names=', None,
'Names of packages to document. Defaults to all '
'configured packages in the project. Comma-separated.'),
('output-dir=', 'o',
'Folder for generated output. Default: docs'),
('verbose', 'v', 'print verbose warnings')]
description = 'Generates an API documentation using epydoc.'
FORMAT_HTML = 'html'
FORMAT_PDF = 'pdf'
def initialize_options(self):
self.format = None
self.verbose = False
self.config = None
self.names = ''
self.output_dir = 'docs'
def finalize_options(self):
if self.format is None:
self.format = self.FORMAT_HTML
if not self.format in [self.FORMAT_HTML, self.FORMAT_PDF]:
self.format = self.FORMAT_HTML
self.names = [module.strip()
for module in re.split('[\s,]+', self.names)
if len(module.strip()) > 0]
def run(self):
# ensure that everything that's needed is built
self.run_command('build')
outdir = os.path.join(self.output_dir, self.format)
try:
os.makedirs(outdir)
except OSError:
pass
# build the argument string
cmdline = []
cmdline.append('--' + self.format)
cmdline.append('-o')
cmdline.append(outdir)
if self.verbose:
cmdline.append('-v')
if self.config is not None:
cmdline.append('--config')
cmdline.append(self.config)
base = self.get_finalized_command('build_py')
names = []
if self.names is None or len(self.names) == 0:
for package, _, _ in base.find_all_modules():
pdir = base.get_package_dir(package)
names.append(pdir)
cmdline = cmdline + list(set(names))
else:
cmdline = cmdline + self.names
import copy
import epydoc.cli as ep
argv = copy.copy(sys.argv)
try:
sys.argv = cmdline
ep.cli()
finally:
sys.argv = argv
| import os
import sys
import re
from setuptools import Command
class EpydocCommand(Command):
'''
Setuptools command used to build an API documentation with epydoc.
@author: jwienke
'''
user_options = [('format=', 'f',
'the output format to use (html and pdf)'),
('config=', 'c',
'Epydoc configuration file'),
('names=', None,
'Names of packages to document. Defaults to all '
'configured packages in the project. Comma-separated.'),
('output-dir=', 'o',
'Folder for generated output. Default: docs'),
('verbose', 'v', 'print verbose warnings')]
description = 'Generates an API documentation using epydoc.'
FORMAT_HTML = 'html'
FORMAT_PDF = 'pdf'
def initialize_options(self):
self.format = None
self.verbose = False
self.config = None
self.names = ''
self.output_dir = 'docs'
def finalize_options(self):
if self.format is None:
self.format = self.FORMAT_HTML
if not self.format in [self.FORMAT_HTML, self.FORMAT_PDF]:
self.format = self.FORMAT_HTML
self.names = [module.strip()
for module in re.split('[\s,]+', self.names)
if len(module.strip()) > 0]
def run(self):
# ensure that everything that's needed is built
self.run_command('build')
outdir = os.path.join(self.output_dir, self.format)
try:
os.makedirs(outdir)
except OSError:
pass
# build the argument string
cmdline = ['foo']
cmdline.append('--' + self.format)
cmdline.append('-o')
cmdline.append(outdir)
if self.verbose:
cmdline.append('-v')
if self.config is not None:
cmdline.append('--config')
cmdline.append(self.config)
base = self.get_finalized_command('build_py')
if self.names is None or len(self.names) == 0:
for package, _, _ in base.find_all_modules():
cmdline.append(package)
else:
cmdline = cmdline + self.names
import copy
import epydoc.cli as ep
argv = copy.copy(sys.argv)
try:
sys.argv = cmdline
ep.cli()
finally:
sys.argv = argv
| Python | 0 |
48cd6af0e138dd28b18ca3a71f41976c71483445 | Add --forceuninstall option | Python/brewcaskupgrade.py | Python/brewcaskupgrade.py | #! /usr/bin/env python3
# -*- coding: utf8 -*-
import argparse
import shutil
from subprocess import check_output, run
parser = argparse.ArgumentParser(description='Update every entries found in cask folder.')
parser.add_argument('--pretend', dest='pretend', action='store_true',
help='Pretend to take action.')
parser.add_argument('--forceuninstall', dest='forceuninstall', action='store_true',
help='Force uninstall before install.')
parser.set_defaults(pretend=False, forceuninstall=False)
args = parser.parse_args()
brew_bin = 'brew'
if not shutil.which(brew_bin):
raise FileExistsError(brew_bin + ' not exists')
list_command = [
brew_bin,
'cask',
'list'
]
list_installed = str.split(check_output(list_command).decode(), '\n')
list_installed = [i for i in list_installed if i is not '']
print(str(len(list_installed)) + ' cask(s) installed')
updated_count = 0
for cask in list_installed:
info_command = [
brew_bin,
'cask',
'info',
cask
]
try:
install_status = str.splitlines(check_output(info_command).decode())
except:
install_status = 'Not installed'
version = str.strip(str.split(install_status[0], ':')[1])
for line in install_status:
if not line.startswith(cask) and cask in line and version in line:
is_version_installed = True
is_version_installed = False
if not is_version_installed:
print('Installing', cask)
install_command = [
brew_bin,
'cask',
'install',
'--force',
cask
]
if args.pretend:
print(' '.join(install_command))
else:
if args.forceuninstall:
uninstall_command = [
brew_bin,
'cask',
'uninstall',
'--force',
cask
]
run(uninstall_command)
run(install_command)
updated_count += 1
print(str(updated_count) + ' cask(s) updated')
| #! /usr/bin/env python3
# -*- coding: utf8 -*-
import argparse
import shutil
from subprocess import check_output, run
parser = argparse.ArgumentParser(description='Update every entries found in cask folder.')
parser.add_argument('--pretend', dest='pretend', action='store_true',
help='Pretend to take action.')
parser.set_defaults(pretend=False)
args = parser.parse_args()
brew_bin = 'brew'
if not shutil.which(brew_bin):
raise FileExistsError(brew_bin + ' not exists')
list_command = [
brew_bin,
'cask',
'list'
]
list_installed = str.split(check_output(list_command).decode(), '\n')
list_installed = [i for i in list_installed if i is not '']
print(str(len(list_installed)) + ' cask(s) installed')
updated_count = 0
for cask in list_installed:
info_command = [
brew_bin,
'cask',
'info',
cask
]
try:
install_status = str.splitlines(check_output(info_command).decode())
except:
install_status = 'Not installed'
version = str.strip(str.split(install_status[0], ':')[1])
is_version_installed = False
for line in install_status:
if not line.startswith(cask) and cask in line and version in line:
is_version_installed = True
if not is_version_installed:
print('Installing', cask)
install_command = [
brew_bin,
'cask',
'install',
'--force',
cask
]
if args.pretend:
print(' '.join(install_command))
else:
run(install_command)
updated_count += 1
print(str(updated_count) + ' cask(s) updated') | Python | 0 |
a4bc6c0c4d13629dbdfef30edcba262efce0eaff | fix up config for heroku | colorsearchtest/settings.py | colorsearchtest/settings.py | # -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get('COLORSEARCHTEST_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
SQLALCHEMY_DATABASE_URI = ((os.environ.get('HEROKU') is not None)
and os_env.get(
'DATABASE_URL',
'postgresql://localhost/example')
or os_env.get(
'COLORSEARCHTEST_DATABASE_URI',
'postgresql://localhost/example')) # TODO: Change me
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
IS_DELTA_E_COLORMATH_ENABLED = False
IS_DELTA_E_DBQUERY_ENABLED = True
MAX_COLORS = 100
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
| # -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get('COLORSEARCHTEST_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
SQLALCHEMY_DATABASE_URI = os_env.get(
'COLORSEARCHTEST_DATABASE_URI',
'postgresql://localhost/example') # TODO: Change me
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
IS_DELTA_E_COLORMATH_ENABLED = False
IS_DELTA_E_DBQUERY_ENABLED = True
MAX_COLORS = 100
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
| Python | 0.000001 |
f20eb91dcf04bc8e33fbb48ebfbef1b56acbf02d | Make functions that pull a number of tweets and pics | web.py | web.py | """ Heroku/Python Quickstart: https://blog.heroku.com/archives/2011/9/28/python_and_django"""
import os
import random
import requests
from flask import Flask
import tweepy
import settings
app = Flask(__name__)
@app.route('/')
def home_page():
return 'Hello from the SPARK learn-a-thon!'
def get_instagram_image():
instagram_api_url = 'https://api.instagram.com/v1/tags/spark/media/recent?client_id={}'.format(settings.CLIENT_ID)
data = requests.get(instagram_api_url).json()['data']
number_of_images = choose_number_of_images()
images_returned = []
for image in number_of_images:
images_returned.append(random.choice(data)['images']['low_resolution']['url'])
return images_returned
def get_tweets():
auth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)
auth.set_access_token(settings.ACCESS_KEY, settings.ACCESS_SECRET)
api = tweepy.API(auth)
number_of_tweets = choose_number_of_tweets()
tweets_text = []
tweets = tweepy.Cursor(api.search, q='#spark')
for tweet in tweets.items(limit=number_of_tweets):
tweets_text.append(tweet.text)
return tweets_text
def choose_number_of_images():
number = 3
return number
def choose_number_of_tweets():
number = 3
return number
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| """ Heroku/Python Quickstart: https://blog.heroku.com/archives/2011/9/28/python_and_django"""
import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def home_page():
return 'Hello from the SPARK learn-a-thon!'
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| Python | 0.000002 |
2a87ed1772a530b07c69e1d2086cd54160dd440a | fix sample test | samples/snippets/speech_adaptation_beta.py | samples/snippets/speech_adaptation_beta.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_adaptation_beta")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Speech Adaptation (Cloud Storage)
# description: Transcribe a short audio file with speech adaptation.
# usage: python3 samples/v1p1beta1/speech_adaptation_beta.py [--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"] [--phrase "Brooklyn Bridge"]
# [START speech_adaptation_beta]
from google.cloud import speech_v1p1beta1
from google.cloud.speech_v1p1beta1 import enums
def sample_recognize(storage_uri, phrase):
"""
Transcribe a short audio file with speech adaptation.
Args:
storage_uri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
phrase Phrase "hints" help recognize the specified phrases from your audio.
"""
client = speech_v1p1beta1.SpeechClient()
# storage_uri = 'gs://cloud-samples-data/speech/brooklyn_bridge.mp3'
# phrase = 'Brooklyn Bridge'
phrases = [phrase]
# Hint Boost. This value increases the probability that a specific
# phrase will be recognized over other similar sounding phrases.
# The higher the boost, the higher the chance of false positive
# recognition as well. Can accept wide range of positive values.
# Most use cases are best served with values between 0 and 20.
# Using a binary search happroach may help you find the optimal value.
boost = 20.0
speech_contexts_element = {"phrases": phrases, "boost": boost}
speech_contexts = [speech_contexts_element]
# Sample rate in Hertz of the audio data sent
sample_rate_hertz = 44100
# The language of the supplied audio
language_code = "en-US"
# Encoding of audio data sent. This sample sets this explicitly.
# This field is optional for FLAC and WAV audio formats.
encoding = enums.RecognitionConfig.AudioEncoding.MP3
config = {
"speech_contexts": speech_contexts,
"sample_rate_hertz": sample_rate_hertz,
"language_code": language_code,
"encoding": encoding,
}
audio = {"uri": storage_uri}
response = client.recognize(config, audio)
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_adaptation_beta]
return response
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--storage_uri",
type=str,
default="gs://cloud-samples-data/speech/brooklyn_bridge.mp3",
)
parser.add_argument("--phrase", type=str, default="Brooklyn Bridge")
args = parser.parse_args()
sample_recognize(args.storage_uri, args.phrase)
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_adaptation_beta")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Speech Adaptation (Cloud Storage)
# description: Transcribe a short audio file with speech adaptation.
# usage: python3 samples/v1p1beta1/speech_adaptation_beta.py [--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"] [--phrase "Brooklyn Bridge"]
# [START speech_adaptation_beta]
from google.cloud import speech_v1p1beta1
from google.cloud.speech_v1p1beta1 import enums
def sample_recognize(storage_uri, phrase):
"""
Transcribe a short audio file with speech adaptation.
Args:
storage_uri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
phrase Phrase "hints" help recognize the specified phrases from your audio.
"""
client = speech_v1p1beta1.SpeechClient()
# storage_uri = 'gs://cloud-samples-data/speech/brooklyn_bridge.mp3'
# phrase = 'Brooklyn Bridge'
phrases = [phrase]
# Hint Boost. This value increases the probability that a specific
# phrase will be recognized over other similar sounding phrases.
# The higher the boost, the higher the chance of false positive
# recognition as well. Can accept wide range of positive values.
# Most use cases are best served with values between 0 and 20.
# Using a binary search happroach may help you find the optimal value.
boost = 20.0
speech_contexts_element = {"phrases": phrases, "boost": boost}
speech_contexts = [speech_contexts_element]
# Sample rate in Hertz of the audio data sent
sample_rate_hertz = 44100
# The language of the supplied audio
language_code = "en-US"
# Encoding of audio data sent. This sample sets this explicitly.
# This field is optional for FLAC and WAV audio formats.
encoding = enums.RecognitionConfig.AudioEncoding.MP3
config = {
"speech_contexts": speech_contexts,
"sample_rate_hertz": sample_rate_hertz,
"language_code": language_code,
"encoding": encoding,
}
audio = {"uri": storage_uri}
response = client.recognize(config, audio)
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_adaptation_beta]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--storage_uri",
type=str,
default="gs://cloud-samples-data/speech/brooklyn_bridge.mp3",
)
parser.add_argument("--phrase", type=str, default="Brooklyn Bridge")
args = parser.parse_args()
sample_recognize(args.storage_uri, args.phrase)
if __name__ == "__main__":
main()
| Python | 0.000729 |
74a5836d41386a847d2e69e2335e0825fb64972f | Add CPU_ONLY tag for sparse_feature_hash layer | caffe2/python/layers/sparse_feature_hash.py | caffe2/python/layers/sparse_feature_hash.py | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sparse_feature_hash
# Module caffe2.python.layers.sparse_feature_hash
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
IdList,
IdScoreList,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
self.seed = seed
self.use_hashing = use_hashing
if schema.equal_schemas(input_record, IdList):
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.List(
values=hashed_indices,
lengths_blob=input_record.lengths,
)
elif schema.equal_schemas(input_record, IdScoreList):
self.modulo = modulo or self.extract_hash_size(input_record.keys.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.keys.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.Map(
keys=hashed_indices,
values=input_record.values,
lengths_blob=input_record.lengths,
)
else:
assert False, "Input type must be one of (IdList, IdScoreList)"
assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
# operators in this layer do not have CUDA implementation yet.
# In addition, since the sparse feature keys that we are hashing are
# typically on CPU originally, it makes sense to have this layer on CPU.
self.tags.update([Tags.CPU_ONLY])
def extract_hash_size(self, metadata):
if metadata.feature_specs and metadata.feature_specs.desired_hash_size:
return metadata.feature_specs.desired_hash_size
elif metadata.categorical_limit is not None:
return metadata.categorical_limit
else:
assert False, "desired_hash_size or categorical_limit must be set"
def add_ops(self, net):
if schema.equal_schemas(self.output_schema, IdList):
input_blob = self.input_record.items()
output_blob = self.output_schema.items()
elif schema.equal_schemas(self.output_schema, IdScoreList):
input_blob = self.input_record.keys()
output_blob = self.output_schema.keys()
else:
raise NotImplementedError()
if self.use_hashing:
net.IndexHash(
input_blob, output_blob, seed=self.seed, modulo=self.modulo
)
else:
net.Mod(
input_blob, output_blob, divisor=self.modulo, sign_follow_divisor=True
)
| # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sparse_feature_hash
# Module caffe2.python.layers.sparse_feature_hash
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
IdList,
IdScoreList,
)
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
self.seed = seed
self.use_hashing = use_hashing
if schema.equal_schemas(input_record, IdList):
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.List(
values=hashed_indices,
lengths_blob=input_record.lengths,
)
elif schema.equal_schemas(input_record, IdScoreList):
self.modulo = modulo or self.extract_hash_size(input_record.keys.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.keys.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.Map(
keys=hashed_indices,
values=input_record.values,
lengths_blob=input_record.lengths,
)
else:
assert False, "Input type must be one of (IdList, IdScoreList)"
assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
def extract_hash_size(self, metadata):
if metadata.feature_specs and metadata.feature_specs.desired_hash_size:
return metadata.feature_specs.desired_hash_size
elif metadata.categorical_limit is not None:
return metadata.categorical_limit
else:
assert False, "desired_hash_size or categorical_limit must be set"
def add_ops(self, net):
if schema.equal_schemas(self.output_schema, IdList):
input_blob = self.input_record.items()
output_blob = self.output_schema.items()
elif schema.equal_schemas(self.output_schema, IdScoreList):
input_blob = self.input_record.keys()
output_blob = self.output_schema.keys()
else:
raise NotImplementedError()
if self.use_hashing:
net.IndexHash(
input_blob, output_blob, seed=self.seed, modulo=self.modulo
)
else:
net.Mod(
input_blob, output_blob, divisor=self.modulo, sign_follow_divisor=True
)
| Python | 0.000001 |
0d58c2ffc8ec6afc353a242f942f668b0b7f362c | Correct shipping repository method calls | sandbox/apps/shipping/repository.py | sandbox/apps/shipping/repository.py | from decimal import Decimal as D
from oscar.apps.shipping.methods import Free, FixedPrice
from oscar.apps.shipping.repository import Repository as CoreRepository
class Repository(CoreRepository):
"""
This class is included so that there is a choice of shipping methods.
Oscar's default behaviour is to only have one which means you can't test
the shipping features of PayPal.
"""
def get_shipping_methods(self, user, basket, shipping_addr=None, **kwargs):
methods = [Free(), FixedPrice(D('10.00')), FixedPrice(D('20.00'))]
return self.prime_methods(basket, methods)
| from decimal import Decimal as D
from oscar.apps.shipping.methods import Free, FixedPrice
from oscar.apps.shipping.repository import Repository as CoreRepository
class Repository(CoreRepository):
"""
This class is included so that there is a choice of shipping methods.
Oscar's default behaviour is to only have one which means you can't test the
shipping features of PayPal.
"""
def get_shipping_methods(self, user, basket, shipping_addr=None, **kwargs):
methods = [Free(), FixedPrice(D('10.00')), FixedPrice(D('20.00'))]
return self.add_basket_to_methods(basket, methods)
| Python | 0 |
edec18a82d6027c8a011fbef84c8aa3b80e18826 | Update forward_device1.py | Server/forward_device1.py | Server/forward_device1.py | import zmq
def main():
print "\nServer for ProBot is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| import zmq
def main():
print "\nServer for ProBot is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| Python | 0.000006 |
2100b512ffb188374e1d883cd2f359586182596b | ADD migration name | packages/grid/backend/alembic/versions/2021-09-20_916812f40fb4.py | packages/grid/backend/alembic/versions/2021-09-20_916812f40fb4.py | """ADD daa_document column at setup table
Revision ID: 916812f40fb4
Revises: 5796f6ceb314
Create Date: 2021-09-20 01:07:37.239186
"""
# third party
from alembic import op # type: ignore
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "916812f40fb4"
down_revision = "5796f6ceb314"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("setup", sa.Column("daa_document", sa.String(255), default=""))
pass
def downgrade() -> None:
pass
| """empty message
Revision ID: 916812f40fb4
Revises: 5796f6ceb314
Create Date: 2021-09-20 01:07:37.239186
"""
# third party
from alembic import op # type: ignore
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "916812f40fb4"
down_revision = "5796f6ceb314"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("setup", sa.Column("daa_document", sa.String(255), default=""))
pass
def downgrade() -> None:
pass
| Python | 0.000003 |
80ede493f698395176d3c67dd1e4f3723b0d5859 | Add initial pass at writing the git commit hook | mothermayi/hook.py | mothermayi/hook.py | import logging
import os
LOGGER = logging.getLogger(__name__)
class NoRepoFoundError(Exception):
pass
class PreCommitExists(Exception):
pass
def find_git_repo():
location = os.path.abspath('.')
while location != '/':
check = os.path.join(location, '.git')
if os.path.exists(check) and os.path.isdir(check):
return check
location = os.path.dirname(location)
raise NoRepoFoundError("Could not find a git repository (.git) in {}".format(os.path.abspath('.')))
HOOK_CONTENT = """
mothermayi run
"""
def write_hook(pre_commit):
with open(pre_commit, 'w') as f:
f.write(HOOK_CONTENT)
def install():
repo = find_git_repo()
LOGGER.debug("Found git repo at %s", repo)
hooks = os.path.join(repo, 'hooks')
pre_commit = os.path.join(hooks, 'pre-commit')
if os.path.exists(pre_commit):
raise PreCommitExists("A git hook already exists at {}. Refusing to overwrite. Please remove it manually".format(pre_commit))
write_hook(pre_commit)
| import logging
import os
LOGGER = logging.getLogger(__name__)
class NoRepoFoundError(Exception):
pass
def find_git_repo():
location = os.path.abspath('.')
while location != '/':
check = os.path.join(location, '.git')
if os.path.exists(check) and os.path.isdir(check):
return check
location = os.path.dirname(location)
raise NoRepoFoundError("Could not find a git repository (.git) in {}".format(os.path.abspath('.')))
def install():
repo = find_git_repo()
LOGGER.debug("Found git repo at %s", repo)
| Python | 0 |
5d5f73ac411873c0ec82e233b74ce70f4de4ab03 | Optimize migration process | openprocurement/planning/api/migration.py | openprocurement/planning/api/migration.py | # -*- coding: utf-8 -*-
import logging
from openprocurement.planning.api.traversal import Root
from openprocurement.planning.api.models import Plan
LOGGER = logging.getLogger(__name__)
SCHEMA_VERSION = 1
SCHEMA_DOC = 'openprocurement_plans_schema'
def get_db_schema_version(db):
schema_doc = db.get(SCHEMA_DOC, {"_id": SCHEMA_DOC})
return schema_doc.get("version", SCHEMA_VERSION - 1)
def set_db_schema_version(db, version):
schema_doc = db.get(SCHEMA_DOC, {"_id": SCHEMA_DOC})
schema_doc["version"] = version
db.save(schema_doc)
def migrate_data(registry, destination=None):
if registry.settings.get('plugins') and 'planning' not in registry.settings['plugins'].split(','):
return
cur_version = get_db_schema_version(registry.db)
if cur_version == SCHEMA_VERSION:
return cur_version
for step in xrange(cur_version, destination or SCHEMA_VERSION):
LOGGER.info("Migrate openprocurement plans schema from {} to {}".format(step, step + 1), extra={'MESSAGE_ID': 'migrate_data'})
migration_func = globals().get('from{}to{}'.format(step, step + 1))
if migration_func:
migration_func(registry)
set_db_schema_version(registry.db, step + 1)
def from0to1(registry):
class Request(object):
def __init__(self, registry):
self.registry = registry
len(registry.db.view('plans/all', limit=1))
results = registry.db.iterview('plans/all', 2 ** 10, include_docs=True, stale='update_after')
docs = []
request = Request(registry)
root = Root(request)
for i in results:
doc = i.doc
if not all([i.get('url', '').startswith(registry.docservice_url) for i in doc.get('documents', [])]):
plan = Plan(doc)
plan.__parent__ = root
doc = plan.to_primitive()
doc['dateModified'] = get_now().isoformat()
docs.append(doc)
if len(docs) >= 2 ** 7:
registry.db.update(docs)
docs = []
if docs:
registry.db.update(docs)
| # -*- coding: utf-8 -*-
import logging
from openprocurement.planning.api.traversal import Root
from openprocurement.planning.api.models import Plan
LOGGER = logging.getLogger(__name__)
SCHEMA_VERSION = 1
SCHEMA_DOC = 'openprocurement_plans_schema'
def get_db_schema_version(db):
schema_doc = db.get(SCHEMA_DOC, {"_id": SCHEMA_DOC})
return schema_doc.get("version", SCHEMA_VERSION - 1)
def set_db_schema_version(db, version):
schema_doc = db.get(SCHEMA_DOC, {"_id": SCHEMA_DOC})
schema_doc["version"] = version
db.save(schema_doc)
def migrate_data(registry, destination=None):
if registry.settings.get('plugins') and 'planning' not in registry.settings['plugins'].split(','):
return
cur_version = get_db_schema_version(registry.db)
if cur_version == SCHEMA_VERSION:
return cur_version
for step in xrange(cur_version, destination or SCHEMA_VERSION):
LOGGER.info("Migrate openprocurement plans schema from {} to {}".format(step, step + 1), extra={'MESSAGE_ID': 'migrate_data'})
migration_func = globals().get('from{}to{}'.format(step, step + 1))
if migration_func:
migration_func(registry)
set_db_schema_version(registry.db, step + 1)
def from0to1(registry):
class Request(object):
def __init__(self, registry):
self.registry = registry
len(registry.db.view('plans/all', limit=1))
results = registry.db.iterview('plans/all', 2 ** 10, include_docs=True, stale='update_after')
docs = []
request = Request(registry)
root = Root(request)
for i in results:
doc = i.doc
if doc.get('documents'):
plan = Plan(doc)
plan.__parent__ = root
doc = plan.to_primitive()
docs.append(doc)
if len(docs) >= 2 ** 7:
registry.db.update(docs)
docs = []
if docs:
registry.db.update(docs)
| Python | 0.000004 |
47e2f60c8e10b6b2c87f2df40f362b70cb09fade | this should be a tuple | cyder/core/system/models.py | cyder/core/system/models.py | from django.db import models
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.cydhcp.keyvalue.models import KeyValue
class System(BaseModel, ObjectUrlMixin):
name = models.CharField(max_length=255, unique=False)
search_fields = ('name',)
display_fields = ('name', 'pk')
def __str__(self):
return "{0} : {1}".format(*(str(getattr(self, f))
for f in self.display_fields))
class Meta:
db_table = 'system'
def details(self):
"""For tables."""
data = super(System, self).details()
data['data'] = [
('Name', 'name', self),
]
return data
def eg_metadata(self):
"""EditableGrid metadata."""
return {'metadata': [
{'name': 'name', 'datatype': 'string', 'editable': True},
]}
class SystemKeyValue(KeyValue):
system = models.ForeignKey(System, null=False)
class Meta:
db_table = 'system_key_value'
unique_together = ('key', 'value', 'system')
| from django.db import models
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.cydhcp.keyvalue.models import KeyValue
class System(BaseModel, ObjectUrlMixin):
name = models.CharField(max_length=255, unique=False)
search_fields = ('name')
display_fields = ('name', 'pk')
def __str__(self):
return "{0} : {1}".format(*(str(getattr(self, f))
for f in self.display_fields))
class Meta:
db_table = 'system'
def details(self):
"""For tables."""
data = super(System, self).details()
data['data'] = [
('Name', 'name', self),
]
return data
def eg_metadata(self):
"""EditableGrid metadata."""
return {'metadata': [
{'name': 'name', 'datatype': 'string', 'editable': True},
]}
class SystemKeyValue(KeyValue):
system = models.ForeignKey(System, null=False)
class Meta:
db_table = 'system_key_value'
unique_together = ('key', 'value', 'system')
| Python | 1 |
310f1d32bf4edcd3046d6648d5133c8ef7a4a8d6 | Fix issue with system ctnr change not propagating correctly to its interfaces | cyder/core/system/models.py | cyder/core/system/models.py | from django.db import models
from django.db.models import Q
from django.db.models.loading import get_model
from cyder.base.eav.constants import ATTRIBUTE_INVENTORY
from cyder.base.eav.fields import EAVAttributeField
from cyder.base.eav.models import Attribute, EAVBase
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.base.utils import transaction_atomic
from cyder.core.system.validators import validate_no_spaces
class System(BaseModel, ObjectUrlMixin):
name = models.CharField(
max_length=255, unique=False, null=False, blank=False,
validators=[validate_no_spaces])
ctnr = models.ForeignKey("cyder.Ctnr", null=False,
verbose_name="Container")
search_fields = ('name',)
sort_fields = ('name',)
def __unicode__(self):
return self.name
class Meta:
app_label = 'cyder'
db_table = 'system'
@staticmethod
def filter_by_ctnr(ctnr, objects=None):
objects = objects if objects is not None else System.objects
return objects.filter(ctnr=ctnr)
def check_in_ctnr(self, ctnr):
return self.ctnr == ctnr
def details(self):
"""For tables."""
data = super(System, self).details()
data['data'] = [
('Name', 'name', self),
('Ctnr', 'ctnr', self.ctnr),
]
return data
@transaction_atomic
def delete(self, *args, **kwargs):
DynamicInterface = get_model('cyder', 'dynamicinterface')
for interface in DynamicInterface.objects.filter(system=self):
interface.delete(delete_system=False, commit=False)
StaticInterface = get_model('cyder', 'staticinterface')
for interface in StaticInterface.objects.filter(system=self):
interface.delete(delete_system=False, commit=False)
super(System, self).delete(*args, **kwargs)
@staticmethod
def eg_metadata():
"""EditableGrid metadata."""
return {'metadata': [
{'name': 'name', 'datatype': 'string', 'editable': True},
]}
@transaction_atomic
def save(self, *args, **kwargs):
self.full_clean()
super(System, self).save(*args, **kwargs)
for i in (list(self.staticinterface_set.all()) +
list(self.dynamicinterface_set.all())):
if self.ctnr != i.ctnr:
i.ctnr = self.ctnr
i.save()
class SystemAV(EAVBase):
class Meta(EAVBase.Meta):
app_label = 'cyder'
db_table = 'system_av'
entity = models.ForeignKey(System)
attribute = EAVAttributeField(Attribute,
type_choices=(ATTRIBUTE_INVENTORY,))
| from django.db import models
from django.db.models import Q
from django.db.models.loading import get_model
from cyder.base.eav.constants import ATTRIBUTE_INVENTORY
from cyder.base.eav.fields import EAVAttributeField
from cyder.base.eav.models import Attribute, EAVBase
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.base.utils import transaction_atomic
from cyder.core.system.validators import validate_no_spaces
class System(BaseModel, ObjectUrlMixin):
name = models.CharField(
max_length=255, unique=False, null=False, blank=False,
validators=[validate_no_spaces])
ctnr = models.ForeignKey("cyder.Ctnr", null=False,
verbose_name="Container")
search_fields = ('name',)
sort_fields = ('name',)
def __unicode__(self):
return self.name
class Meta:
app_label = 'cyder'
db_table = 'system'
@staticmethod
def filter_by_ctnr(ctnr, objects=None):
objects = objects if objects is not None else System.objects
return objects.filter(ctnr=ctnr)
def check_in_ctnr(self, ctnr):
return self.ctnr == ctnr
def details(self):
"""For tables."""
data = super(System, self).details()
data['data'] = [
('Name', 'name', self),
('Ctnr', 'ctnr', self.ctnr),
]
return data
@transaction_atomic
def delete(self, *args, **kwargs):
DynamicInterface = get_model('cyder', 'dynamicinterface')
for interface in DynamicInterface.objects.filter(system=self):
interface.delete(delete_system=False, commit=False)
StaticInterface = get_model('cyder', 'staticinterface')
for interface in StaticInterface.objects.filter(system=self):
interface.delete(delete_system=False, commit=False)
super(System, self).delete(*args, **kwargs)
@staticmethod
def eg_metadata():
"""EditableGrid metadata."""
return {'metadata': [
{'name': 'name', 'datatype': 'string', 'editable': True},
]}
@transaction_atomic
def save(self, *args, **kwargs):
self.full_clean()
super(System, self).save(*args, **kwargs)
def clean(self, *args, **kwargs):
for i in (list(self.staticinterface_set.all()) +
list(self.dynamicinterface_set.all())):
if self.ctnr != i.ctnr:
i.ctnr = self.ctnr
i.save()
super(System, self).clean(*args, **kwargs)
class SystemAV(EAVBase):
class Meta(EAVBase.Meta):
app_label = 'cyder'
db_table = 'system_av'
entity = models.ForeignKey(System)
attribute = EAVAttributeField(Attribute,
type_choices=(ATTRIBUTE_INVENTORY,))
| Python | 0 |
d0a9d10d0df25de670e8bf9a1e603ed1fbe5ca29 | use helpers | py3status/modules/taskwarrior.py | py3status/modules/taskwarrior.py | # -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: refresh interval for this module (default 5)
format: display format for this module (default '{task}')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
"""
import json
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
def taskWarrior(self):
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
task_command = 'task start.before:tomorrow status:pending export'
task_json = json.loads(self.py3.command_output(task_command))
task_result = ', '.join(map(describeTask, task_json))
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': task_result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| # -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: how often we refresh this module in seconds (default 5)
format: display format for taskwarrior (default '{task}')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
"""
# import your useful libs here
from subprocess import check_output
import json
import shlex
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
def taskWarrior(self):
command = 'task start.before:tomorrow status:pending export'
taskwarrior_output = check_output(shlex.split(command))
tasks_json = json.loads(taskwarrior_output.decode('utf-8'))
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
result = ', '.join(map(describeTask, tasks_json))
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| Python | 0.001805 |
c9b7e886f9276079fc79fbe394f5b15595f04603 | Test fixes | ownblock/ownblock/apps/messaging/tests.py | ownblock/ownblock/apps/messaging/tests.py | from unittest.mock import Mock
from django.test import TestCase
from rest_framework import serializers
from apps.accounts.tests import ResidentFactory
from apps.buildings.tests import ApartmentFactory
from .serializers import MessageSerializer
class SerializerTests(TestCase):
def test_validate_recipient_if_same_as_sender(self):
apt = ApartmentFactory.create()
req = Mock()
req.user = ResidentFactory.create(apartment=apt)
serializer = MessageSerializer(context={'request': req})
attrs = {'recipient': req.user}
self.assertRaises(serializers.ValidationError,
serializer.validate_recipient, attrs, 'recipient')
def test_validate_recipient_if_does_not_exist(self):
apt = ApartmentFactory.create()
req = Mock()
req.user = ResidentFactory.create(apartment=apt)
recipient = ResidentFactory.create()
serializer = MessageSerializer(context={'request': req})
attrs = {'recipient': recipient}
self.assertRaises(serializers.ValidationError,
serializer.validate_recipient, attrs, 'recipient')
def test_validate_recipient_if_ok(self):
pass
| from unittest.mock import Mock
from django.test import TestCase
from rest_framework import serializers
from apps.accounts.tests import ResidentFactory
from apps.buildings.tests import ApartmentFactory
from .serializers import MessageSerializer
class SerializerTests(TestCase):
def test_validate_recipient_if_same_as_sender(self):
apt = ApartmentFactory.create()
req = Mock()
req.user = ResidentFactory.create(apartment=apt)
serializer = MessageSerializer(context={'request': req})
attrs = {'recipient': req.user}
self.assertRaises(serializers.ValidationError,
serializer.validate_recipient, attrs, 'recipient')
def test_validate_recipient_if_does_not_exist(self):
pass
def test_validate_recipient_if_ok(self):
pass
| Python | 0.000001 |
5e9eda407832d9b97e7f78219f20236e04306a32 | fix test, probably broken by a epydoc change this code is dead though so i don't much care | pydoctor/test/test_formatting.py | pydoctor/test/test_formatting.py | from pydoctor import html, model
from py import test
def test_signatures():
argspec = [['a', 'b', 'c'], None, None, (1,2)]
assert html.getBetterThanArgspec(argspec) == (['a'], [('b', 1), ('c', 2)])
def test_strsig():
argspec = [['a', 'b', 'c'], None, None, (1,2)]
assert html.signature(argspec) == "a, b=1, c=2"
def test_strsigvar():
argspec = [['a', 'b', 'c'], 'args', 'kk', (1,2)]
assert html.signature(argspec) == "a, *args, b=1, c=2, **kk"
def test_strsigargh():
argspec = [['a', ['b','c']], None, None, ()]
assert html.signature(argspec) == 'a, (b, c)'
def test_link():
doc0 = model.Documentable(None, 'twisted', None)
docco = model.Documentable(None, 'threadz', None, doc0)
assert html.link(docco) == 'twisted.threadz.html'
def test_summaryDoc():
docco = model.Documentable(None, 'threadz', 'Woot\nYeah')
assert html.summaryDoc(docco) == html.doc2html(docco, 'Woot')
def test_boringDocstring():
assert html.boringDocstring('Woot\nYeah') == '<pre>Woot\nYeah</pre>'
def test_reallyBoringDocstring():
undocced = '<pre class="undocumented">Undocumented</pre>'
assert html.boringDocstring('') == undocced
assert html.boringDocstring(None) == undocced
def test_doc2htmlEpy():
if not html.EPYTEXT:
test.skip("Epytext not available")
assert html.doc2html(None, 'Woot\nYeah') == '<div><p>Woot Yeah</p>\n</div>'
class TestEpyHackers:
def setup_method(self, meth):
self.orig = html.EPYTEXT
def teardown_method(self, meth):
html.EPYTEXT = self.orig
def test_doc2htmlBoring(self):
if html.EPYTEXT:
html.EPYTEXT = False
assert html.doc2html(object(), 'Woot\nYeah') == '<pre>Woot\nYeah</pre>'
def test_generateModuleIndex(self):
#This test is a bit un-unity
# And *damnit* how do I write teardowners
html.EPYTEXT = False
sysw = html.SystemWriter(None)
pack = model.Package(None, 'twisted', None)
mod = model.Module(None, 'threadz', 'Woot\nYeah', pack)
fun = model.Function(None, 'blat', 'HICKY HECK\nYEAH', mod)
fun.argspec = [(), None, None, ()]
out = sysw.getHTMLFor(fun)
assert 'blat()' in out
assert 'HICKY HECK\nYEAH' in out
| from pydoctor import html, model
from py import test
def test_signatures():
argspec = [['a', 'b', 'c'], None, None, (1,2)]
assert html.getBetterThanArgspec(argspec) == (['a'], [('b', 1), ('c', 2)])
def test_strsig():
argspec = [['a', 'b', 'c'], None, None, (1,2)]
assert html.signature(argspec) == "a, b=1, c=2"
def test_strsigvar():
argspec = [['a', 'b', 'c'], 'args', 'kk', (1,2)]
assert html.signature(argspec) == "a, *args, b=1, c=2, **kk"
def test_strsigargh():
argspec = [['a', ['b','c']], None, None, ()]
assert html.signature(argspec) == 'a, (b, c)'
def test_link():
doc0 = model.Documentable(None, 'twisted', None)
docco = model.Documentable(None, 'threadz', None, doc0)
assert html.link(docco) == 'twisted.threadz.html'
def test_summaryDoc():
docco = model.Documentable(None, 'threadz', 'Woot\nYeah')
assert html.summaryDoc(docco) == html.doc2html(docco, 'Woot')
def test_boringDocstring():
assert html.boringDocstring('Woot\nYeah') == '<pre>Woot\nYeah</pre>'
def test_reallyBoringDocstring():
undocced = '<pre class="undocumented">Undocumented</pre>'
assert html.boringDocstring('') == undocced
assert html.boringDocstring(None) == undocced
def test_doc2htmlEpy():
if not html.EPYTEXT:
test.skip("Epytext not available")
assert html.doc2html(None, 'Woot\nYeah') == '<div>Woot Yeah\n</div>'
class TestEpyHackers:
def setup_method(self, meth):
self.orig = html.EPYTEXT
def teardown_method(self, meth):
html.EPYTEXT = self.orig
def test_doc2htmlBoring(self):
if html.EPYTEXT:
html.EPYTEXT = False
assert html.doc2html(object(), 'Woot\nYeah') == '<pre>Woot\nYeah</pre>'
def test_generateModuleIndex(self):
#This test is a bit un-unity
# And *damnit* how do I write teardowners
html.EPYTEXT = False
sysw = html.SystemWriter(None)
pack = model.Package(None, 'twisted', None)
mod = model.Module(None, 'threadz', 'Woot\nYeah', pack)
fun = model.Function(None, 'blat', 'HICKY HECK\nYEAH', mod)
fun.argspec = [(), None, None, ()]
out = sysw.getHTMLFor(fun)
assert 'blat()' in out
assert 'HICKY HECK\nYEAH' in out
| Python | 0 |
d9189f91370abd1e20e5010bb70d9c47efd58215 | Change read_chrom_sizes to read from a FAIDX index if available | muver/reference.py | muver/reference.py | import os
from wrappers import bowtie2, picard, samtools
def create_reference_indices(ref_fn):
'''
For a given reference FASTA file, generate several indices.
'''
bowtie2.build(ref_fn)
samtools.faidx_index(ref_fn)
picard.create_sequence_dictionary(ref_fn)
def read_chrom_sizes(reference_assembly_fn):
'''
Iterate through a FASTA file to find the length of each chromosome. If a
FAIDX index is available, it will read the lengths from there.
'''
chrom_sizes = dict()
if os.path.exists(reference_assembly_fn + '.fai'):
with open(reference_assembly_fn + '.fai') as f:
for line in f:
chromosome, size = line.strip().split('\t')[:2]
chrom_sizes[chromosome] = int(size)
else:
last_chromosome = None
with open(reference_assembly_fn) as f:
for line in f:
if line.startswith('>'):
last_chromosome = line.split('>')[1].strip()
chrom_sizes[last_chromosome] = 0
else:
chrom_sizes[last_chromosome] += len(line.strip())
return chrom_sizes
def read_chrom_sizes_from_file(chrom_sizes_fn):
'''
Read chromosome sizes from a UCSC chrom_sizes file.
'''
chrom_sizes = dict()
with open(chrom_sizes_fn) as f:
for line in f:
chromosome, size = line.strip().split()
chrom_sizes[chromosome] = int(size)
return chrom_sizes
| from wrappers import bowtie2, picard, samtools
def create_reference_indices(ref_fn):
'''
For a given reference FASTA file, generate several indices.
'''
bowtie2.build(ref_fn)
samtools.faidx_index(ref_fn)
picard.create_sequence_dictionary(ref_fn)
def read_chrom_sizes(reference_assembly_fn):
'''
Iterate through a FASTA file to find the length of each chromosome.
'''
chrom_sizes = dict()
last_chromosome = None
with open(reference_assembly_fn) as f:
for line in f:
if line.startswith('>'):
last_chromosome = line.split('>')[1].strip()
chrom_sizes[last_chromosome] = 0
else:
chrom_sizes[last_chromosome] += len(line.strip())
return chrom_sizes
def read_chrom_sizes_from_file(chrom_sizes_fn):
'''
Read chromosome sizes from a UCSC chrom_sizes file.
'''
chrom_sizes = dict()
with open(chrom_sizes_fn) as f:
for line in f:
chromosome, size = line.strip().split()
chrom_sizes[chromosome] = int(size)
return chrom_sizes
| Python | 0 |
8e1610570a50282594a5516ee473cf13bec2ce71 | fix typo | core/drivers/count/count.py | core/drivers/count/count.py | keywords = ['SELECT', 'INSERT', 'UPDATE', 'DELETE']
def count_query(queries):
ret = {}
for keyword in keywords:
ret[keyword] = 0
for query in queries:
for keyword in keywords:
if query.startswith(keyword):
ret[keyword] += 1
break
return ret | keywords = ['SET', 'INSERT', 'UPDATE', 'DELETE']
def count_query(queries):
ret = {}
for keyword in keywords:
ret[keyword] = 0
for query in queries:
for keyword in keywords:
if query.startswith(keyword):
ret[keyword] += 1
break
return ret | Python | 0.999991 |
4657acf6408b2fb416e2c9577ac09d18d81f8a68 | Remove unused NHS database mockup | nameless/config.py | nameless/config.py | import os
_basedir = os.path.abspath(os.path.dirname(__file__))
# Plugin settings
DATABASE_NAMES = ['atc', 'sms']
# Using sqlite for local development, will be SQL on production.
SQLALCHEMY_BINDS = {
'atc': 'sqlite:///' + os.path.join(_basedir, 'db/atc.db'),
'sms': 'sqlite:///' + os.path.join(_basedir, 'db/sms.db')
}
# TxtLocal SMS settings
SENDER = '447786202240'
INBOX_ID = '498863'
API_KEY = 'Sap3A0EaE2k-xL6d4nLJuQdZriNxBByUjRhOCHM5X0'
API_URI = 'https://api.txtlocal.com/'
API_SEND_URI = API_URI + 'send/?'
API_RECEIVE_URI = API_URI + 'get_messages/?'
TEST_MODE = 1 # 1 (True) to enable test mode & 0 to disable.
| import os
_basedir = os.path.abspath(os.path.dirname(__file__))
# Plugin settings
DATABASE_NAMES = ['atc', 'nhs', 'sms']
# Using sqlite for local development, will be SQL on production.
SQLALCHEMY_BINDS = {
'atc': 'sqlite:///' + os.path.join(_basedir, 'db/atc.db'),
'nhs': 'sqlite:///' + os.path.join(_basedir, 'db/nhs.db'),
'sms': 'sqlite:///' + os.path.join(_basedir, 'db/sms.db')
}
# TxtLocal SMS settings
SENDER = '447786202240'
INBOX_ID = '498863'
API_KEY = 'Sap3A0EaE2k-xL6d4nLJuQdZriNxBByUjRhOCHM5X0'
API_URI = 'https://api.txtlocal.com/'
API_SEND_URI = API_URI + 'send/?'
API_RECEIVE_URI = API_URI + 'get_messages/?'
TEST_MODE = 1 # 1 (True) to enable test mode & 0 to disable.
| Python | 0 |
10801bca03c03d6b6bb7b6108733178dcf5a8b53 | Revert 87dbc5eb9665b5a145a3c2a190f64e2ce4c09fd4^..HEAD | shop/views.py | shop/views.py | from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views.generic.simple import direct_to_template
from shop.forms import OrderForm
from shop.models import EmailEntry, Order
from datetime import datetime
import urllib
from xml.dom import minidom
def index(request):
print request.META['HTTP_HOST']
if request.META['HTTP_HOST'] == 'localhost:8000':
return HttpResponseRedirect('/opkikker')
else:
return HttpResponseRedirect('/rustgever')
def opkikker(request):
if request.POST:
form = EmailEntry.Form(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
EmailEntry.objects.get_or_create(email=email, date_added=datetime.now())
form.clean()
return direct_to_template(request, 'opkikker.html', extra_context={'succes': True})
else:
return direct_to_template(request, 'opkikker.html', extra_context={'error': True, 'form': form,})
else:
form = EmailEntry.Form()
return direct_to_template(request, 'opkikker.html', extra_context={'form': form})
def rustgever(request):
return direct_to_template(request, 'rustgever.html')
def order(request):
if request.POST:
form = OrderForm(request.POST)
if form.is_valid():
order = form.save()
order.save()
total_amount = int(((order.product_price * order.product_amount) + order.product_shipment_cost) * 100)
return HttpResponseRedirect(get_payment_url(total_amount, order.id))
else:
return direct_to_template(request, 'bestel-rustgever.html', extra_context={'error': True, 'form': form,})
else:
form = OrderForm()
return direct_to_template(request, 'bestel-rustgever.html', extra_context={'form': form})
def get_payment_url(amount, id):
URL = "https://secure.mollie.nl/xml/ideal?a=create-link&partnerid=705747&amount="+str(amount)+"&description=Zen%20Garden%20Rustgever(tm)%20order_id%20"+str(id)+"&profile_key=e510805f"
print URL
result = urllib.urlopen(URL).read()
splits = result.split("<URL>")
return splits[1].split("</URL>")[0]
| from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views.generic.simple import direct_to_template
from shop.forms import OrderForm
from shop.models import EmailEntry, Order
from datetime import datetime
import urllib
from xml.dom import minidom
def index(request):
print request.META['HTTP_HOST']
if request.META['HTTP_HOST'] == 'www.opkikker.nl':
return HttpResponseRedirect('/opkikker')
else:
return HttpResponseRedirect('/rustgever')
def opkikker(request):
if request.POST:
form = EmailEntry.Form(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
EmailEntry.objects.get_or_create(email=email, date_added=datetime.now())
form.clean()
return direct_to_template(request, 'opkikker.html', extra_context={'succes': True})
else:
return direct_to_template(request, 'opkikker.html', extra_context={'error': True, 'form': form,})
else:
form = EmailEntry.Form()
return direct_to_template(request, 'opkikker.html', extra_context={'form': form})
def rustgever(request):
return direct_to_template(request, 'rustgever.html')
def order(request):
if request.POST:
form = OrderForm(request.POST)
if form.is_valid():
order = form.save()
order.save()
total_amount = int(((order.product_price * order.product_amount) + order.product_shipment_cost) * 100)
return HttpResponseRedirect(get_payment_url(total_amount, order.id))
else:
return direct_to_template(request, 'bestel-rustgever.html', extra_context={'error': True, 'form': form,})
else:
form = OrderForm()
return direct_to_template(request, 'bestel-rustgever.html', extra_context={'form': form})
def get_payment_url(amount, id):
URL = "https://secure.mollie.nl/xml/ideal?a=create-link&partnerid=705747&amount="+str(amount)+"&description=Zen%20Garden%20Rustgever(tm)%20order_id%20"+str(id)+"&profile_key=e510805f"
print URL
result = urllib.urlopen(URL).read()
splits = result.split("<URL>")
return splits[1].split("</URL>")[0]
| Python | 0.000001 |
c81f4d0659366e1512a4b64f0cce65d50de25927 | update to 3.29.0 | packages/dependencies/sqlite3.py | packages/dependencies/sqlite3.py | {
'repo_type' : 'archive',
'custom_cflag' : '-O2', # make sure we build it without -ffast-math
'download_locations' : [
{ 'url' : 'https://www.sqlite.org/2019/sqlite-autoconf-3290000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : '8e7c1e2950b5b04c5944a981cb31fffbf9d2ddda939d536838ebc854481afd5b' }, ], },
{ 'url' : 'https://fossies.org/linux/misc/sqlite-autoconf-3290000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : '8e7c1e2950b5b04c5944a981cb31fffbf9d2ddda939d536838ebc854481afd5b' }, ], },
],
'cflag_addition' : '-fexceptions -DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_USE_MALLOC_H=1 -DSQLITE_USE_MSIZE=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_RTREE=1 -fno-strict-aliasing',
'configure_options': '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static --enable-threadsafe --disable-editline --enable-readline --enable-json1 --enable-fts5 --enable-session',
'depends_on': (
'zlib',
),
'update_check' : { 'url' : 'https://www.sqlite.org/index.html', 'type' : 'httpregex', 'regex' : r'<a href="releaselog/.*\.html">Version (?P<version_num>[\d.]+)<\/a>' },
'_info' : { 'version' : '3.29.0', 'fancy_name' : 'libsqlite3' },
} | {
'repo_type' : 'archive',
'custom_cflag' : '-O2', # make sure we build it without -ffast-math
'download_locations' : [
{ 'url' : 'https://www.sqlite.org/2019/sqlite-autoconf-3280000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'd61b5286f062adfce5125eaf544d495300656908e61fca143517afcc0a89b7c3' }, ], },
{ 'url' : 'https://fossies.org/linux/misc/sqlite-autoconf-3280000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'd61b5286f062adfce5125eaf544d495300656908e61fca143517afcc0a89b7c3' }, ], },
],
'cflag_addition' : '-fexceptions -DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_USE_MALLOC_H=1 -DSQLITE_USE_MSIZE=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_RTREE=1 -fno-strict-aliasing',
'configure_options': '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static --enable-threadsafe --disable-editline --enable-readline --enable-json1 --enable-fts5 --enable-session',
'depends_on': (
'zlib',
),
'update_check' : { 'url' : 'https://www.sqlite.org/index.html', 'type' : 'httpregex', 'regex' : r'<a href="releaselog/.*\.html">Version (?P<version_num>[\d.]+)<\/a>' },
'_info' : { 'version' : '3.28.0', 'fancy_name' : 'libsqlite3' },
} | Python | 0 |
0fee973ea7a4ca7b79c84ed55fa1d327c754beee | Add tests and some fixes for class extension pattern | readthedocs/core/utils/extend.py | readthedocs/core/utils/extend.py | """Patterns for extending Read the Docs"""
import inspect
from django.conf import settings
from django.utils.module_loading import import_by_path
from django.utils.functional import LazyObject
class SettingsOverrideObject(LazyObject):
"""Base class for creating class that can be overridden
This is used for extension points in the code, where we want to extend a
class without monkey patching it. This abstract class allows for lazy
inheritance, creating a class from the specified class or from a setting,
but only once the class is called.
Default to an instance of the class defined by :py:cvar:`_default_class`.
Next, look for an override setting class path in
``settings.CLASS_OVERRIDES``, which should be a dictionary of class paths.
The setting should be a dictionary keyed by the object path name::
CLASS_OVERRIDES = {
'readthedocs.core.resolver.Resolver': 'something.resolver.Resolver',
}
Lastly, if ``settings.CLASS_OVERRIDES`` is missing, or the key is not found,
attempt to pull the key :py:cvar:`_override_setting` from ``settings``.
"""
_default_class = None
_override_setting = None
def _setup(self):
"""Set up wrapped object
This is called when attributes are accessed on :py:cls:`LazyObject`
and the underlying wrapped object does not yet exist.
"""
cls = self._default_class
cls_path = (getattr(settings, 'CLASS_OVERRIDES', {})
.get(self._get_class_id()))
if cls_path is None and self._override_setting is not None:
cls_path = getattr(settings, self._override_setting, None)
if cls_path is not None:
cls = import_by_path(cls_path)
self._wrapped = cls()
def _get_class_id(self):
# type() here, because LazyObject overrides some attribute access
return '.'.join([inspect.getmodule(type(self)).__name__,
type(self).__name__])
| """Patterns for extending Read the Docs"""
from django.conf import settings
from django.utils.module_loading import import_by_path
from django.utils.functional import LazyObject
class SettingsOverrideObject(LazyObject):
"""Base class for creating class that can be overridden
This is used for extension points in the code, where we want to extend a
class without monkey patching it. This abstract class allows for lazy
inheritance, creating a class from the specified class or from a setting,
but only once the class is called.
Default to an instance of the class defined by :py:cvar:`_default_class`.
Next, look for an override setting class path in
``settings.CLASS_OVERRIDES``, which should be a dictionary of class paths.
The setting should be a dictionary keyed by the object path name::
CLASS_OVERRIDES = {
'readthedocs.core.resolver.Resolver': 'something.resolver.Resolver',
}
Lastly, if ``settings.CLASS_OVERRIDES`` is missing, or the key is not found,
attempt to pull the key :py:cvar:`_override_setting` from ``settings``.
"""
_default_class = None
_override_setting = None
def _setup(self):
"""Set up wrapped object
This is called when attributes are accessed on :py:cls:`LazyObject`
and the underlying wrapped object does not yet exist.
"""
cls = self._default_class
cls_path = (getattr(settings, 'CLASS_OVERRIDES', {})
.get(self.get_class_id()))
if cls_path is None:
cls_path = getattr(settings, self._override_setting, None)
if cls_path is not None:
cls = import_by_path(cls_path)
self._wrapped = cls()
def get_class_id(self):
# type() here, because LazyObject overrides some attribute access
return '.'.join([__name__, type(self).__name__])
| Python | 0 |
252da1473643916dd10e7a250d64c8bedb8ae5a9 | Use username as id too; #35 | judge/views/select2.py | judge/views/select2.py | from django.db.models import Q
from django.http import JsonResponse
from django.utils.encoding import smart_text
from django.views.generic.list import BaseListView
from judge.models import Profile, Organization, Problem, Comment, Contest
from judge.templatetags.gravatar import get_gravatar_url
class Select2View(BaseListView):
paginate_by = 20
def get(self, request, *args, **kwargs):
self.request = request
self.term = kwargs.get('term', request.GET.get('term', ''))
self.object_list = self.get_queryset()
context = self.get_context_data()
return JsonResponse({
'results': [
{
'text': smart_text(self.get_name(obj)),
'id': obj.pk,
} for obj in context['object_list']],
'more': context['page_obj'].has_next(),
})
def get_name(self, obj):
return unicode(obj)
class UserSelect2View(Select2View):
def get_queryset(self):
return Profile.objects.filter(Q(user__username__icontains=(self.term)) | Q(name__icontains=(self.term))) \
.select_related('user')
class OrganizationSelect2View(Select2View):
def get_queryset(self):
return Organization.objects.filter(Q(key__icontains=self.term) | Q(name__icontains=self.term))
class ProblemSelect2View(Select2View):
def get_queryset(self):
queryset = Problem.objects.filter(Q(code__icontains=self.term) | Q(name__icontains=self.term))
if not self.request.user.has_perm('judge.see_private_problem'):
filter = Q(is_public=True)
if self.request.user.is_authenticated():
filter |= Q(authors=self.request.user.profile)
queryset = queryset.filter(filter)
return queryset
class ContestSelect2View(Select2View):
def get_queryset(self):
queryset = Contest.objects.filter(Q(key__icontains=self.term) | Q(name__icontains=self.term))
if not self.request.user.has_perm('judge.see_private_contest'):
queryset = queryset.filter(is_public=True)
if not self.request.user.has_perm('judge.edit_all_contest'):
q = Q(is_private=False)
if self.request.user.is_authenticated():
q |= Q(organizations__in=self.request.user.profile.organizations.all())
queryset = queryset.filter(q)
return queryset
class CommentSelect2View(Select2View):
def get_queryset(self):
return Comment.objects.filter(Q(title__icontains=self.term) | Q(page__icontains=self.term))
class UserSearchSelect2View(BaseListView):
paginate_by = 20
def get(self, request, *args, **kwargs):
self.request = request
self.term = kwargs.get('term', request.GET.get('term', ''))
self.gravatar_size = request.GET.get('gravatar_size', 128)
self.gravatar_default = request.GET.get('gravatar_default', None)
self.object_list = (Profile.objects.filter(Q(user__username__icontains=(self.term)) |
Q(name__icontains=(self.term)))
.values_list('pk', 'user__username', 'user__email'))
context = self.get_context_data()
return JsonResponse({
'results': [
{
'text': username,
'id': username,
'gravatar_url': get_gravatar_url(email, self.gravatar_size, self.gravatar_default),
} for pk, username, email in context['object_list']],
'more': context['page_obj'].has_next(),
})
def get_name(self, obj):
return unicode(obj)
| from django.db.models import Q
from django.http import JsonResponse
from django.utils.encoding import smart_text
from django.views.generic.list import BaseListView
from judge.models import Profile, Organization, Problem, Comment, Contest
from judge.templatetags.gravatar import get_gravatar_url
class Select2View(BaseListView):
paginate_by = 20
def get(self, request, *args, **kwargs):
self.request = request
self.term = kwargs.get('term', request.GET.get('term', ''))
self.object_list = self.get_queryset()
context = self.get_context_data()
return JsonResponse({
'results': [
{
'text': smart_text(self.get_name(obj)),
'id': obj.pk,
} for obj in context['object_list']],
'more': context['page_obj'].has_next(),
})
def get_name(self, obj):
return unicode(obj)
class UserSelect2View(Select2View):
def get_queryset(self):
return Profile.objects.filter(Q(user__username__icontains=(self.term)) | Q(name__icontains=(self.term))) \
.select_related('user')
class OrganizationSelect2View(Select2View):
def get_queryset(self):
return Organization.objects.filter(Q(key__icontains=self.term) | Q(name__icontains=self.term))
class ProblemSelect2View(Select2View):
def get_queryset(self):
queryset = Problem.objects.filter(Q(code__icontains=self.term) | Q(name__icontains=self.term))
if not self.request.user.has_perm('judge.see_private_problem'):
filter = Q(is_public=True)
if self.request.user.is_authenticated():
filter |= Q(authors=self.request.user.profile)
queryset = queryset.filter(filter)
return queryset
class ContestSelect2View(Select2View):
def get_queryset(self):
queryset = Contest.objects.filter(Q(key__icontains=self.term) | Q(name__icontains=self.term))
if not self.request.user.has_perm('judge.see_private_contest'):
queryset = queryset.filter(is_public=True)
if not self.request.user.has_perm('judge.edit_all_contest'):
q = Q(is_private=False)
if self.request.user.is_authenticated():
q |= Q(organizations__in=self.request.user.profile.organizations.all())
queryset = queryset.filter(q)
return queryset
class CommentSelect2View(Select2View):
def get_queryset(self):
return Comment.objects.filter(Q(title__icontains=self.term) | Q(page__icontains=self.term))
class UserSearchSelect2View(BaseListView):
paginate_by = 20
def get(self, request, *args, **kwargs):
self.request = request
self.term = kwargs.get('term', request.GET.get('term', ''))
self.gravatar_size = request.GET.get('gravatar_size', 128)
self.gravatar_default = request.GET.get('gravatar_default', None)
self.object_list = (Profile.objects.filter(Q(user__username__icontains=(self.term)) |
Q(name__icontains=(self.term)))
.values_list('pk', 'user__username', 'user__email'))
context = self.get_context_data()
return JsonResponse({
'results': [
{
'text': username, 'id': pk,
'gravatar_url': get_gravatar_url(email, self.gravatar_size, self.gravatar_default),
} for pk, username, email in context['object_list']],
'more': context['page_obj'].has_next(),
})
def get_name(self, obj):
return unicode(obj)
| Python | 0.00001 |
f1c49d33c829c56f0dff12a20563ca7a1b3fbc41 | Print the other way around (makes more sense) | Toolkit/AimlessSurface.py | Toolkit/AimlessSurface.py | from __future__ import division
def work():
from scitbx import math
from scitbx.array_family import flex
N=15
lfg = math.log_factorial_generator(N)
nsssphe = math.nss_spherical_harmonics(6,50000,lfg)
l = 2
m = 1
t = 1
p = 1
print nsssphe.spherical_harmonic(2, 1, 1, 1)
def n_terms():
orders = {}
for j in range(10):
nterms = 0
for k in range(1, j+1):
for l in range(-k, k+1):
nterms += 1
orders[nterms] = j
print orders
def order_from_nterm(n):
return {0: 0, 80: 8, 3: 1, 8: 2, 15: 3, 48: 6, 99: 9, 35: 5, 24: 4, 63: 7}[n]
def evaluate_1degree(ClmList, png_filename):
from scitbx import math
from scitbx.array_family import flex
import math as pymath
import numpy
d2r = pymath.pi / 180.0
order = order_from_nterm(len(ClmList))
lfg = math.log_factorial_generator(2 * order + 1)
nsssphe = math.nss_spherical_harmonics(order,50000,lfg)
Clm = { }
idx = 0
for l in range(1, order+1):
for m in range(-l, l+1):
Clm[(l,m)] = ClmList[idx]
idx += 1
abscor = numpy.empty((1+180//1, 1+360//1), float, 'C')
sqrt2 = pymath.sqrt(2)
for t in range(0, 361, 1):
for p in range(0, 181, 1):
a = 1.0
for l in range(1, order+1):
for m in range(-l, l+1):
# Ylm = nsssphe.spherical_harmonic(l, m, t*d2r, p*d2r)
# Convert from complex to real according to
# http://en.wikipedia.org/wiki/Spherical_harmonics#Real_form
Ylm = nsssphe.spherical_harmonic(l, abs(m), t*d2r, p*d2r)
if m < 0:
a += Clm[(l,m)] * sqrt2 * ((-1) ** m) * Ylm.imag
elif m == 0:
assert(Ylm.imag == 0.0)
a += Clm[(l,m)] * Ylm.real
else:
a += Clm[(l,m)] * sqrt2 * ((-1) ** m) * Ylm.real
abscor[(p//1, t//1)] = a
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
plot = pyplot.imshow(abscor)
pyplot.colorbar()
pyplot.savefig(png_filename)
return
def scrape_coefficients(log_file_name):
Clm = { }
c = 0
l = 0
coefficients = []
for record in open(log_file_name):
if 'Coefficient(Sd)' in record:
for token in record.split()[1:]:
coefficients.append(float(token.split('(')[0]))
return coefficients
if __name__ == '__main__':
import sys
evaluate_1degree(scrape_coefficients(sys.argv[1]), sys.argv[2])
| from __future__ import division
def work():
from scitbx import math
from scitbx.array_family import flex
N=15
lfg = math.log_factorial_generator(N)
nsssphe = math.nss_spherical_harmonics(6,50000,lfg)
l = 2
m = 1
t = 1
p = 1
print nsssphe.spherical_harmonic(2, 1, 1, 1)
def n_terms():
orders = {}
for j in range(10):
nterms = 0
for k in range(1, j+1):
for l in range(-k, k+1):
nterms += 1
orders[nterms] = j
print orders
def order_from_nterm(n):
return {0: 0, 80: 8, 3: 1, 8: 2, 15: 3, 48: 6, 99: 9, 35: 5, 24: 4, 63: 7}[n]
def evaluate_1degree(ClmList, png_filename):
from scitbx import math
from scitbx.array_family import flex
import math as pymath
import numpy
d2r = pymath.pi / 180.0
order = order_from_nterm(len(ClmList))
lfg = math.log_factorial_generator(2 * order + 1)
nsssphe = math.nss_spherical_harmonics(order,50000,lfg)
Clm = { }
idx = 0
for l in range(1, order+1):
for m in range(-l, l+1):
Clm[(l,m)] = ClmList[idx]
idx += 1
abscor = numpy.empty((1+360//1, 1+180//1), float, 'C')
sqrt2 = pymath.sqrt(2)
for t in range(0, 361, 1):
for p in range(0, 181, 1):
a = 1.0
for l in range(1, order+1):
for m in range(-l, l+1):
# Ylm = nsssphe.spherical_harmonic(l, m, t*d2r, p*d2r)
# Convert from complex to real according to
# http://en.wikipedia.org/wiki/Spherical_harmonics#Real_form
Ylm = nsssphe.spherical_harmonic(l, abs(m), t*d2r, p*d2r)
if m < 0:
a += Clm[(l,m)] * sqrt2 * ((-1) ** m) * Ylm.imag
elif m == 0:
assert(Ylm.imag == 0.0)
a += Clm[(l,m)] * Ylm.real
else:
a += Clm[(l,m)] * sqrt2 * ((-1) ** m) * Ylm.real
abscor[(t//1, p//1)] = a
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
plot = pyplot.imshow(abscor)
pyplot.colorbar()
pyplot.savefig(png_filename)
return
def scrape_coefficients(log_file_name):
Clm = { }
c = 0
l = 0
coefficients = []
for record in open(log_file_name):
if 'Coefficient(Sd)' in record:
for token in record.split()[1:]:
coefficients.append(float(token.split('(')[0]))
return coefficients
if __name__ == '__main__':
import sys
evaluate_1degree(scrape_coefficients(sys.argv[1]), sys.argv[2])
| Python | 0.000078 |
f1e071957214e787521c7de887ca1fe369671bc7 | Add constants | UI/resources/constants.py | UI/resources/constants.py | # -*- coding: utf-8 -*-
SAVE_PASSWORD_HASHED = True
MAX_RETRIES_DOWNLOAD_FROM_SAME_FARMER = 3
MAX_RETRIES_UPLOAD_TO_SAME_FARMER = 3
MAX_RETRIES_NEGOTIATE_CONTRACT = 1000
MAX_RETRIES_GET_FILE_POINTERS = 100
GET_DEFAULT_TMP_PATH_FROM_ENV_VARIABLES = True
GET_HOME_PATH_FROM_ENV_VARIABLES = True
FILE_POINTERS_REQUEST_DELAY = 1
FILE_POINTERS_ITERATION_DELAY = 0.2
CONTRACT_NEGOTIATION_ITERATION_DELAY = 0.2
MAX_POINTERS_RESOLVED_IN_ONE_PART = 50
MINIMAL_ALLOWED_BRIDGE_REQUEST_TIMEOUT = 5
# int: file pointers request delay, file pointers iteration delay, max pointers resolved in one part,
# minimal bridge request timeout, in seconds.
MAX_DOWNLOAD_REQUEST_BLOCK_SIZE = 4 * 1024
MAX_UPLOAD_REQUEST_BLOCK_SIZE = 4096
MAX_UPLOAD_CONNECTIONS_AT_SAME_TIME = 4
MAX_DOWNLOAD_CONNECTIONS_AT_SAME_TIME = 4
CONCURRENT_UPLOADING = False
DEFAULT_MAX_BRIDGE_REQUEST_TIMEOUT = 90
DEFAULT_MAX_FARMER_CONNECTION_TIMEOUT = 7
DEFAULT_MAX_FARMER_DOWNLOAD_READ_TIMEOUT = 17
# int: maximum bridge request timeout, in seconds.
MAX_ALLOWED_UPLOAD_CONCURRENCY = 9999
MAX_ALLOWED_DOWNLOAD_CONCURRENCY = 9999
DEFAULT_BRIDGE_API_URL = 'api.storj.io'
CONFIG_FILE_NAME = 'storj_client_config.xml'
# DESIGN
DISPLAY_FILE_CREATION_DATE_IN_MAIN = True
DISPLAY_FILE_ID_IN_MAIN = True
FILE_LIST_SORTING_MAIN_ENABLED = True
AUTO_SCROLL_UPLOAD_DOWNLOAD_QUEUE = True
SHOW_TRAY_ICON = False
BUCKETS_LIST_SORTING_ENABLED = True
MIRRORS_TREE_SORTING_ENABLED = True
FIXED_WINDOWS_SIZE = True
ALLOW_DOWNLOAD_FARMER_POINTER_CANCEL_BY_USER = True
ALLOW_UPLOAD_FARMER_CANCEL_BY_USER = True
DATA_TABLE_EDIT_ENABLED = False
# BLACKLISTING
FARMER_NODES_EXCLUSION_FOR_UPLOAD_ENABLED = True
FARMER_NODES_EXCLUSION_FOR_DOWNLOAD_ENABLED = True
BLACKLIST_MAX_LENGTH = 300
BLACKLISTING_MODE = 2
# 1 - blacklist all farmers to which shard have been recently uploaded
# 2 - blacklist only farmers to which transfer failed
# PATHS
USE_USER_ENV_PATH_FOR_TEMP = False
DEFAULT_ENCRYPTION_KEYS_DIRECTORY = ""
# SHARDING
DEFAULT_MAX_SHARD_SIZE = 4294967296 # 4Gb
DEFAULT_SHARD_SIZE = 2 * (1024 * 1024) # 8Mb
# UPLOAD
REED_SOLOMON_ENCODING_ENABLED = True
| # -*- coding: utf-8 -*-
SAVE_PASSWORD_HASHED = True
MAX_RETRIES_DOWNLOAD_FROM_SAME_FARMER = 3
MAX_RETRIES_UPLOAD_TO_SAME_FARMER = 3
MAX_RETRIES_NEGOTIATE_CONTRACT = 1000
MAX_RETRIES_GET_FILE_POINTERS = 100
GET_DEFAULT_TMP_PATH_FROM_ENV_VARIABLES = True
GET_HOME_PATH_FROM_ENV_VARIABLES = True
FILE_POINTERS_REQUEST_DELAY = 1
FILE_POINTERS_ITERATION_DELAY = 0.2
CONTRACT_NEGOTIATION_ITERATION_DELAY = 0.2
MAX_POINTERS_RESOLVED_IN_ONE_PART = 50
MINIMAL_ALLOWED_BRIDGE_REQUEST_TIMEOUT = 5
# int: file pointers request delay, file pointers iteration delay, max pointers resolved in one part,
# minimal bridge request timeout, in seconds.
MAX_DOWNLOAD_REQUEST_BLOCK_SIZE = 4 * 1024
MAX_UPLOAD_REQUEST_BLOCK_SIZE = 4096
MAX_UPLOAD_CONNECTIONS_AT_SAME_TIME = 4
MAX_DOWNLOAD_CONNECTIONS_AT_SAME_TIME = 4
CONCURRENT_UPLOADING = False
DEFAULT_MAX_BRIDGE_REQUEST_TIMEOUT = 90
DEFAULT_MAX_FARMER_CONNECTION_TIMEOUT = 7
DEFAULT_MAX_FARMER_DOWNLOAD_READ_TIMEOUT = 17
# int: maximum bridge request timeout, in seconds.
MAX_ALLOWED_UPLOAD_CONCURRENCY = 9999
MAX_ALLOWED_DOWNLOAD_CONCURRENCY = 9999
DEFAULT_BRIDGE_API_URL = 'api.storj.io'
# DESIGN
DISPLAY_FILE_CREATION_DATE_IN_MAIN = True
DISPLAY_FILE_ID_IN_MAIN = True
FILE_LIST_SORTING_MAIN_ENABLED = True
AUTO_SCROLL_UPLOAD_DOWNLOAD_QUEUE = True
SHOW_TRAY_ICON = False
BUCKETS_LIST_SORTING_ENABLED = True
MIRRORS_TREE_SORTING_ENABLED = True
FIXED_WINDOWS_SIZE = True
ALLOW_DOWNLOAD_FARMER_POINTER_CANCEL_BY_USER = True
ALLOW_UPLOAD_FARMER_CANCEL_BY_USER = True
DATA_TABLE_EDIT_ENABLED = False
# BLACKLISTING
FARMER_NODES_EXCLUSION_FOR_UPLOAD_ENABLED = True
FARMER_NODES_EXCLUSION_FOR_DOWNLOAD_ENABLED = True
BLACKLIST_MAX_LENGTH = 300
BLACKLISTING_MODE = 2
# 1 - blacklist all farmers to which shard have been recently uploaded
# 2 - blacklist only farmers to which transfer failed
# PATHS
USE_USER_ENV_PATH_FOR_TEMP = False
DEFAULT_ENCRYPTION_KEYS_DIRECTORY = ""
# SHARDING
DEFAULT_MAX_SHARD_SIZE = 4294967296 # 4Gb
DEFAULT_SHARD_SIZE = 2 * (1024 * 1024) # 8Mb
# UPLOAD
REED_SOLOMON_ENCODING_ENABLED = True
| Python | 0.000228 |
f2a0bbee61a144bf0d1de77dd4b41393fe7428bf | fix Ntests in simuNtests | simuNtests.py | simuNtests.py | # lance simulations pour different nombre d'electeurs
import multiprocessing
import os, sys
import shutil
import time
import numpy as np
from randomSets import *
def worker(((Ncandidats,q, Nwinners))):
"""worker function"""
sys.stdout.write('\nSTART -- %i candidats -- \n' % Ncandidats)
sys.stdout.flush()
time.sleep(0.01) # being sure that simulation are differently initialized
minNvoters = simulate(Ncandidats, q =q, Nwinners = Nwinners)
with open('nmin-candidates-%i' % Ncandidats,'a') as f_handle:
np.savetxt(f_handle,minNvoters)
return
if __name__ == '__main__':
print "Cette fois, c'est la bonne !"
print (time.strftime("%H:%M:%S"))
root = "simulations/"
try:
os.mkdir(root)
except OSError:
pass
Ncandidates = int(sys.argv[1])
Ntests = [sys.argv[1] if len(sys.argv) == 3 else 1000]
Nwinners = 1
args = []
print Ncandidates
for i in range(Ntests):
arg = [Ncandidates,100,1]
args.append(arg)
if args == []:
print "Rien a faire!"
pool = multiprocessing.Pool(processes=20)
pool.map(worker, args)
print "Alors, ca marche ? :)"
| # lance simulations pour different nombre d'electeurs
import multiprocessing
import os, sys
import shutil
import time
import numpy as np
from randomSets import *
def worker(((Ncandidats,q, Nwinners))):
"""worker function"""
sys.stdout.write('\nSTART -- %i candidats -- \n' % Ncandidats)
sys.stdout.flush()
time.sleep(0.01) # being sure that simulation are differently initialized
minNvoters = simulate(Ncandidats, q =q, Nwinners = Nwinners)
with open('nmin-candidates-%i' % Ncandidats,'a') as f_handle:
np.savetxt(f_handle,minNvoters)
return
if __name__ == '__main__':
print "Cette fois, c'est la bonne !"
print (time.strftime("%H:%M:%S"))
root = "simulations/"
try:
os.mkdir(root)
except OSError:
pass
Ncandidates = int(sys.argv[1])
Ntests = [sys.argv[1] if len(sys.argv) == 3 else 1000]
Nwinners = 1
args = []
print Ncandidates
for i in range(Ncandidates):
arg = [Ncandidates,100,1]
args.append(arg)
if args == []:
print "Rien a faire!"
pool = multiprocessing.Pool(processes=1)
pool.map(worker, args)
print "Alors, ca marche ? :)"
| Python | 0.000006 |
3bf9ab0da4b06b8b0383fb6db64947886742899c | Add newline in log of builds after successful rebuild of website. | site/build.py | site/build.py | #!/usr/bin/env python
# -*- coding: ascii -*-
"""
This script can be used to build the website.
It is also run on each commit to github.
Example: ./build public_html
"""
from __future__ import print_function
import datetime
import os
import shutil
import subprocess
import sys
import time
BUILD_DIR = 'build'
def get_build_dir():
try:
build_dir = sys.argv[1]
except IndexError:
build_dir = BUILD_DIR
basedir = os.path.abspath(os.path.curdir)
build_dir = os.path.join(basedir, build_dir)
return build_dir
def build(dest):
source = os.path.split(os.path.abspath(__file__))[0]
source = os.path.join(source, 'src')
# We aren't doing anything fancy yet.
shutil.copytree(source, dest)
def update_gitrepo():
source = os.path.split(os.path.abspath(__file__))[0]
initial = os.getcwd()
try:
os.chdir(source)
subprocess.call(['git', 'pull'])
finally:
os.chdir(initial)
def main():
try:
min_delay = int(sys.argv[2]) * 60
except:
min_delay = 0
# Build only if enough time has passed.
build_dir = get_build_dir()
if os.path.exists(build_dir):
elapsed = time.time() - os.path.getmtime(build_dir)
if elapsed < min_delay:
print("Not enough time has elapsed since last build.")
sys.exit(0)
else:
# Delete it all!
if os.path.islink(build_dir):
os.unlink(build_dir)
else:
shutil.rmtree(build_dir)
elif os.path.islink(build_dir):
# Then its a bad symlink.
os.unlink(build_dir)
#update_gitrepo()
build(build_dir)
subprocess.call(['touch', build_dir])
print("Done.\n")
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: ascii -*-
"""
This script can be used to build the website.
It is also run on each commit to github.
Example: ./build public_html
"""
import datetime
import os
import shutil
import subprocess
import sys
import time
BUILD_DIR = 'build'
def get_build_dir():
try:
build_dir = sys.argv[1]
except IndexError:
build_dir = BUILD_DIR
basedir = os.path.abspath(os.path.curdir)
build_dir = os.path.join(basedir, build_dir)
return build_dir
def build(dest):
source = os.path.split(os.path.abspath(__file__))[0]
source = os.path.join(source, 'src')
# We aren't doing anything fancy yet.
shutil.copytree(source, dest)
def update_gitrepo():
source = os.path.split(os.path.abspath(__file__))[0]
initial = os.getcwd()
try:
os.chdir(source)
subprocess.call(['git', 'pull'])
finally:
os.chdir(initial)
def main():
try:
min_delay = int(sys.argv[2]) * 60
except:
min_delay = 0
# Build only if enough time has passed.
build_dir = get_build_dir()
if os.path.exists(build_dir):
elapsed = time.time() - os.path.getmtime(build_dir)
if elapsed < min_delay:
print "Not enough time has elapsed since last build."
sys.exit(0)
else:
# Delete it all!
if os.path.islink(build_dir):
os.unlink(build_dir)
else:
shutil.rmtree(build_dir)
elif os.path.islink(build_dir):
# Then its a bad symlink.
os.unlink(build_dir)
#update_gitrepo()
build(build_dir)
subprocess.call(['touch', build_dir])
print "Done."
if __name__ == '__main__':
main()
| Python | 0 |
50a6ac219a3ff9f9b6ed6614c8a54ab5e93b525a | set phid_valid to yes since received from phi [skip ci] | custom/icds/repeaters/generators/phi.py | custom/icds/repeaters/generators/phi.py | import json
from django.core.serializers.json import DjangoJSONEncoder
from corehq import toggles
from corehq.apps.hqcase.utils import update_case
from corehq.motech.repeaters.repeater_generators import (
CaseRepeaterJsonPayloadGenerator,
)
class BasePayloadGenerator(CaseRepeaterJsonPayloadGenerator):
@staticmethod
def enabled_for_domain(domain):
return toggles.PHI_CAS_INTEGRATION.enabled(domain)
class SearchByParamsPayloadGenerator(BasePayloadGenerator):
@staticmethod
def _gender(gender):
if gender:
if gender == 'male':
return 'M'
elif gender == 'female':
return 'F'
return ""
def get_payload(self, repeat_record, case):
data = self._setup_names(case)
data.update({
"gender": self._gender(case.get_case_property('gender')),
"villagecode": 442639,
"subdistrictcode": 3318,
"districtcode": 378,
"statecode": 22,
"dateofbirth": case.get_case_property('date_of_birth') or "",
"mobileno": case.get_case_property('mobile_number') or "",
"email": "",
"govt_id_name": "",
"govt_id_number": ""
})
return json.dumps(data, cls=DjangoJSONEncoder)
def _setup_names(self, case):
data = {}
self._setup_name(case.name, 'beneficaryname', 'namelocal', data)
self._setup_name(case.get_case_property('mothers_name'), 'mothername', 'mothernamelocal', data)
self._setup_name(case.get_case_property('fathers_name'), 'fathername', 'fathernamelocal', data)
self._setup_name(case.get_case_property('husbands_name'), 'husbandname', 'husbandnamelocal', data)
return data
def _setup_name(self, name, key_name, key_name_local, data):
data[key_name] = ""
data[key_name_local] = ""
if self._has_special_chars(name):
data[key_name_local] = name
else:
data[key_name] = name
@staticmethod
def _has_special_chars(value):
try:
value.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return True
return False
def handle_success(self, response, case, repeat_record):
phi_id = response.json().get('result', [{}])[0].get('phi_id', None)
if phi_id:
update_case(case.domain, case.case_id,
{'phid_for_beneficiary': phi_id, 'phid_valid': 'yes'},
device_id=__name__ + ".search")
class ValidatePHIDPayloadGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
data = {'phi_id': payload_doc.get_case_property('phid_for_beneficiary')}
return json.dumps(data, cls=DjangoJSONEncoder)
def handle_success(self, response, case, repeat_record):
case_update = {'phid_validated': 'yes'}
if response.json()['result'] == 'true':
case_update['phid_valid'] = 'yes'
else:
case_update['phid_valid'] = 'no'
update_case(case.domain, case.case_id, case_update,
device_id=__name__ + ".validate")
| import json
from django.core.serializers.json import DjangoJSONEncoder
from corehq import toggles
from corehq.apps.hqcase.utils import update_case
from corehq.motech.repeaters.repeater_generators import (
CaseRepeaterJsonPayloadGenerator,
)
class BasePayloadGenerator(CaseRepeaterJsonPayloadGenerator):
@staticmethod
def enabled_for_domain(domain):
return toggles.PHI_CAS_INTEGRATION.enabled(domain)
class SearchByParamsPayloadGenerator(BasePayloadGenerator):
@staticmethod
def _gender(gender):
if gender:
if gender == 'male':
return 'M'
elif gender == 'female':
return 'F'
return ""
def get_payload(self, repeat_record, case):
data = self._setup_names(case)
data.update({
"gender": self._gender(case.get_case_property('gender')),
"villagecode": 442639,
"subdistrictcode": 3318,
"districtcode": 378,
"statecode": 22,
"dateofbirth": case.get_case_property('date_of_birth') or "",
"mobileno": case.get_case_property('mobile_number') or "",
"email": "",
"govt_id_name": "",
"govt_id_number": ""
})
return json.dumps(data, cls=DjangoJSONEncoder)
def _setup_names(self, case):
data = {}
self._setup_name(case.name, 'beneficaryname', 'namelocal', data)
self._setup_name(case.get_case_property('mothers_name'), 'mothername', 'mothernamelocal', data)
self._setup_name(case.get_case_property('fathers_name'), 'fathername', 'fathernamelocal', data)
self._setup_name(case.get_case_property('husbands_name'), 'husbandname', 'husbandnamelocal', data)
return data
def _setup_name(self, name, key_name, key_name_local, data):
data[key_name] = ""
data[key_name_local] = ""
if self._has_special_chars(name):
data[key_name_local] = name
else:
data[key_name] = name
@staticmethod
def _has_special_chars(value):
try:
value.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return True
return False
def handle_success(self, response, case, repeat_record):
phi_id = response.json().get('result', [{}])[0].get('phi_id', None)
if phi_id:
update_case(case.domain, case.case_id, {'phid_for_beneficiary': phi_id},
device_id=__name__ + ".search")
class ValidatePHIDPayloadGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
data = {'phi_id': payload_doc.get_case_property('phid_for_beneficiary')}
return json.dumps(data, cls=DjangoJSONEncoder)
def handle_success(self, response, case, repeat_record):
case_update = {'phid_validated': 'yes'}
if response.json()['result'] == 'true':
case_update['phid_valid'] = 'yes'
else:
case_update['phid_valid'] = 'no'
update_case(case.domain, case.case_id, case_update,
device_id=__name__ + ".validate")
| Python | 0 |
798f80c3efe06869194adf7073af574cc94481b9 | add to init | km3modules/__init__.py | km3modules/__init__.py | # coding=utf-8
# Filename: __init__.py
# pylint: disable=locally-disabled
"""
A collection of commonly used modules.
"""
from km3modules.common import (Dump, Delete, HitCounter, BlobIndexer, Keep,
StatusBar, MemoryObserver, Wrap, Cut)
from km3modules.reco import SvdFit as PrimFit
from km3modules.reco import SvdFit
| # coding=utf-8
# Filename: __init__.py
# pylint: disable=locally-disabled
"""
A collection of commonly used modules.
"""
from km3modules.common import (Dump, Delete, HitCounter, BlobIndexer, Keep,
StatusBar, MemoryObserver, Wrap)
from km3modules.reco import SvdFit as PrimFit
from km3modules.reco import SvdFit
| Python | 0.000006 |
b5f8e3f8dd8d2d99494be83bdddbc1a6078c3161 | Test cleanup connectivity test added. | package/tests/test_connectivity/test_cleanup_connectivity.py | package/tests/test_connectivity/test_cleanup_connectivity.py | from unittest import TestCase
from mock import Mock
from cloudshell.cp.azure.domain.services.network_service import NetworkService
from cloudshell.cp.azure.domain.services.tags import TagService
from cloudshell.cp.azure.domain.services.virtual_machine_service import VirtualMachineService
from cloudshell.cp.azure.domain.vm_management.operations.delete_operation import DeleteAzureVMOperation
from tests.helpers.test_helper import TestHelper
class TestCleanupConnectivity(TestCase):
def setUp(self):
self.logger = Mock()
self.vm_service = VirtualMachineService()
self.network_service = NetworkService()
self.tags_service = TagService()
self.delete_operation = DeleteAzureVMOperation(logger=self.logger,
vm_service=self.vm_service,
network_service=self.network_service,
tags_service=self.tags_service)
def test_cleanup(self):
"""
:return:
"""
# Arrange
self.vm_service.delete_resource_group = Mock()
self.vm_service.delete_sandbox_subnet = Mock()
tested_group_name = "test_group"
resource_client = Mock()
network_client = Mock()
cloud_provider_model = Mock()
vnet = Mock()
subnet = Mock()
subnet.name = tested_group_name
vnet.subnets = [subnet]
reservation = Mock()
reservation.reservation_id = tested_group_name
self.network_service.get_sandbox_virtual_network = Mock(return_value=vnet)
# Act
self.delete_operation.delete_resource_group(resource_client=resource_client, group_name=tested_group_name)
self.delete_operation.delete_sandbox_subnet(network_client=network_client,
cloud_provider_model=cloud_provider_model,
resource_group_name=tested_group_name)
# Verify
self.assertTrue(TestHelper.CheckMethodCalledXTimes(self.vm_service.delete_resource_group))
self.assertTrue(TestHelper.CheckMethodCalledXTimes(self.network_service.get_sandbox_virtual_network))
self.vm_service.delete_resource_group.assert_called_with(resource_management_client=resource_client,
group_name=tested_group_name)
def test_delete_sandbox_subnet_on_error(self):
# Arrange
self.vm_service.delete_resource_group = Mock()
self.vm_service.delete_sandbox_subnet = Mock()
tested_group_name = "test_group"
vnet = Mock()
subnet = Mock()
subnet.name = "test_group_for_exception"
vnet.subnets = [subnet]
reservation = Mock()
reservation.reservation_id = tested_group_name
self.network_service.get_sandbox_virtual_network = Mock(return_value=vnet)
# Act
self.assertRaises(Exception,
self.delete_operation.delete_sandbox_subnet,
)
| from unittest import TestCase
from mock import Mock
from cloudshell.cp.azure.domain.services.network_service import NetworkService
from cloudshell.cp.azure.domain.services.tags import TagService
from cloudshell.cp.azure.domain.services.virtual_machine_service import VirtualMachineService
from cloudshell.cp.azure.domain.vm_management.operations.delete_operation import DeleteAzureVMOperation
from tests.helpers.test_helper import TestHelper
class TestCleanupConnectivity(TestCase):
def setUp(self):
self.logger = Mock()
self.vm_service = VirtualMachineService()
self.network_service = NetworkService()
self.tags_service = TagService()
self.delete_operation = DeleteAzureVMOperation(logger=self.logger,
vm_service=self.vm_service,
network_service=self.network_service,
tags_service=self.tags_service)
def test_cleanup(self):
"""
:return:
"""
# Arrange
self.vm_service.delete_resource_group = Mock()
tested_group_name = "test_group"
resource_client = Mock()
# Act
self.delete_operation.delete_resource_group(resource_client=resource_client, group_name=tested_group_name)
# Verify
self.assertTrue(TestHelper.CheckMethodCalledXTimes(self.vm_service.delete_resource_group))
self.vm_service.delete_resource_group.assert_called_with(resource_management_client=resource_client,
group_name=tested_group_name)
| Python | 0 |
e5ed0e4e6dea58a1412e3c596612e647bd22c619 | Update __init__.py | krempelair/__init__.py | krempelair/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import jinja2
import flask
import views
class Krempelair(flask.Flask):
jinja_options = {
'extensions': ['jinja2.ext.autoescape'],
'undefined': jinja2.StrictUndefined
}
def __init__(self):
"""(See `make_app` for parameter descriptions.)"""
flask.Flask.__init__(self, __name__)
self.setup_routes()
def create_jinja_environment(self):
"""Called by Flask.__init__"""
env = super(Krempelair, self).create_jinja_environment()
for func in [
'force_unicode',
'timesince',
'shorten_sha1',
'shorten_message',
'extract_author_name',
'formattimestamp',
]:
env.filters[func] = getattr(utils, func)
return env
def setup_routes(self):
for endpoint, rule in [
('air_get_status_betrieb', '/'),
('air_get_status_stoerung', '/stoerung'),
('air_set_status', '/<int:pin>/<int:state>'),
('air_set_level', '/lueftung/stufe/<int:level>'),
('air_set_timer', '/lueftung/timer/<int:time>'),
('air_set_temp', '/lueftung/temperatur/sollTemp/<int:temp>'),
('air_set_tempNAK', '/lueftung/temperatur/sollTempNAK/<int:temp>'),
('air_set_raucherraum_on', '/raucherraum/on'),
('air_set_raucherraum_off', '/raucherraum/off'),
('air_get_temperaturen', '/lueftung/temperatur'),
]:
self.add_url_rule(rule, view_func=getattr(views, endpoint))
if __name__ == "__main__":
app = Krempelair()
app.run(host="0.0.0.0", debug=True)
else:
application = Krempelair()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import jinja2
import flask
import views
class Krempelair(flask.Flask):
jinja_options = {
'extensions': ['jinja2.ext.autoescape'],
'undefined': jinja2.StrictUndefined
}
def __init__(self):
"""(See `make_app` for parameter descriptions.)"""
flask.Flask.__init__(self, __name__)
self.setup_routes()
def create_jinja_environment(self):
"""Called by Flask.__init__"""
env = super(Krempelair, self).create_jinja_environment()
for func in [
'force_unicode',
'timesince',
'shorten_sha1',
'shorten_message',
'extract_author_name',
'formattimestamp',
]:
env.filters[func] = getattr(utils, func)
return env
def setup_routes(self):
for endpoint, rule in [
('air_get_status_betrieb', '/'),
('air_get_status_stoerung', '/stoerung'),
('air_set_status', '/<int:pin>/<int:state>'),
('air_set_level', '/lueftung/stufe/<int:level>'),
('air_set_timer', '/lueftung/timer/<int:time>'),
('air_set_temp', '/lueftung/temperatur/<int:temp>'),
('air_set_raucherraum_on', '/raucherraum/on'),
('air_set_raucherraum_off', '/raucherraum/off'),
('air_get_temperaturen', '/lueftung/temperatur'),
]:
self.add_url_rule(rule, view_func=getattr(views, endpoint))
if __name__ == "__main__":
app = Krempelair()
app.run(host="0.0.0.0", debug=True)
else:
application = Krempelair()
| Python | 0.000072 |
3f725f25b0896237b71f68993d9ffa24329f47fe | Keep the same format with other usage: capitalize the head letter | kuryr/common/config.py | kuryr/common/config.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Kuryr
"""
import os
from oslo_config import cfg
from kuryr import i18n
_ = i18n._
core_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../')),
help=_('Directory where Kuryr python module is installed.')),
cfg.StrOpt('bindir',
default='$pybasedir/usr/libexec/kuryr',
help=_('Directory for Kuryr vif binding executables.')),
cfg.StrOpt('kuryr_uri',
default='http://127.0.0.1:2377',
help=_('Kuryr URL for accessing Kuryr through json rpc.')),
cfg.StrOpt('capability_scope',
default='global',
choices=['local', 'global'],
help=_('Kuryr plugin scope reported to libnetwork.')),
]
neutron_opts = [
cfg.StrOpt('neutron_uri',
default=os.environ.get('OS_URL', 'http://127.0.0.1:9696'),
help=_('Neutron URL for accessing the network service.')),
cfg.StrOpt('enable_dhcp',
default='False',
help=_('Enable or Disable dhcp for neutron subnets.')),
]
keystone_opts = [
cfg.StrOpt('auth_uri',
default=os.environ.get('IDENTITY_URL',
'http://127.0.0.1:35357'),
help=_('The URL for accessing the identity service.')),
cfg.StrOpt('admin_user',
default=os.environ.get('SERVICE_USER'),
help=_('The admin username.')),
cfg.StrOpt('admin_tenant_name',
default=os.environ.get('SERVICE_TENANT_NAME'),
help=_('The admin username.')),
cfg.StrOpt('admin_password',
default=os.environ.get('SERVICE_PASSWORD'),
help=_('The admin password.')),
cfg.StrOpt('admin_token',
default=os.environ.get('SERVICE_TOKEN'),
help=_('The admin token.')),
]
binding_opts = [
cfg.StrOpt('veth_dst_prefix',
default='eth',
help=('The name prefix of the veth endpoint put inside the '
'container.'))
]
CONF = cfg.CONF
CONF.register_opts(core_opts)
CONF.register_opts(neutron_opts, group='neutron_client')
CONF.register_opts(keystone_opts, group='keystone_client')
CONF.register_opts(binding_opts, 'binding')
def init(args, **kwargs):
cfg.CONF(args=args, project='kuryr',
version='0.1', **kwargs)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Kuryr
"""
import os
from oslo_config import cfg
from kuryr import i18n
_ = i18n._
core_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../')),
help=_('Directory where kuryr python module is installed.')),
cfg.StrOpt('bindir',
default='$pybasedir/usr/libexec/kuryr',
help=_('Directory for kuryr vif binding executables.')),
cfg.StrOpt('kuryr_uri',
default='http://127.0.0.1:2377',
help=_('Kuryr URL for accessing Kuryr through json rpc.')),
cfg.StrOpt('capability_scope',
default='global',
choices=['local', 'global'],
help=_('Kuryr plugin scope reported to libnetwork.')),
]
neutron_opts = [
cfg.StrOpt('neutron_uri',
default=os.environ.get('OS_URL', 'http://127.0.0.1:9696'),
help=_('Neutron URL for accessing the network service.')),
cfg.StrOpt('enable_dhcp',
default='False',
help=_('Enable or Disable dhcp for neutron subnets.')),
]
keystone_opts = [
cfg.StrOpt('auth_uri',
default=os.environ.get('IDENTITY_URL',
'http://127.0.0.1:35357'),
help=_('The URL for accessing the identity service.')),
cfg.StrOpt('admin_user',
default=os.environ.get('SERVICE_USER'),
help=_('The admin username.')),
cfg.StrOpt('admin_tenant_name',
default=os.environ.get('SERVICE_TENANT_NAME'),
help=_('The admin username.')),
cfg.StrOpt('admin_password',
default=os.environ.get('SERVICE_PASSWORD'),
help=_('The admin password.')),
cfg.StrOpt('admin_token',
default=os.environ.get('SERVICE_TOKEN'),
help=_('The admin token.')),
]
binding_opts = [
cfg.StrOpt('veth_dst_prefix',
default='eth',
help=('The name prefix of the veth endpoint put inside the '
'container.'))
]
CONF = cfg.CONF
CONF.register_opts(core_opts)
CONF.register_opts(neutron_opts, group='neutron_client')
CONF.register_opts(keystone_opts, group='keystone_client')
CONF.register_opts(binding_opts, 'binding')
def init(args, **kwargs):
cfg.CONF(args=args, project='kuryr',
version='0.1', **kwargs)
| Python | 0.999999 |
f751f0bc4ea5466052fdd11a674ddb9a0a3303a4 | Fix pylint | dataset/models/tf/layers/core.py | dataset/models/tf/layers/core.py | """ Contains common layers """
import numpy as np
import tensorflow as tf
def flatten2d(inputs, name=None):
""" Flatten tensor to two dimensions (batch_size, item_vector_size) """
x = tf.convert_to_tensor(inputs)
dims = tf.reduce_prod(tf.shape(x)[1:])
x = tf.reshape(x, [-1, dims], name=name)
return x
def flatten(inputs, name=None):
""" Flatten tensor to two dimensions (batch_size, item_vector_size) using inferred shape and numpy """
x = tf.convert_to_tensor(inputs)
shape = x.get_shape().as_list()
dim = np.prod(shape[1:])
x = tf.reshape(x, [-1, dim], name=name)
return x
def maxout(inputs, depth, axis=-1, name='max'):
""" Shrink last dimension by making max pooling every ``depth`` channels """
with tf.name_scope(name):
x = tf.convert_to_tensor(inputs)
shape = x.get_shape().as_list()
shape[axis] = -1
shape += [depth]
for i, _ in enumerate(shape):
if shape[i] is None:
shape[i] = tf.shape(x)[i]
out = tf.reduce_max(tf.reshape(x, shape), axis=-1, keep_dims=False)
return out
def mip(inputs, depth, data_format='channels_last', name='mip'):
""" Maximum intensity projection by shrinking the channels dimension with max pooling every ``depth`` channels """
with tf.name_scope(name):
x = tf.convert_to_tensor(inputs)
axis = -1 if data_format == 'channels_last' else 1
num_layers = x.get_shape().as_list()[axis]
split_sizes = [depth] * (num_layers // depth)
if num_layers % depth:
split_sizes += [num_layers % depth]
splits = tf.split(x, split_sizes, axis=axis)
mips = []
for split in splits:
amip = tf.reduce_max(split, axis=axis)
mips.append(amip)
mips = tf.stack(mips, axis=axis)
return mips
def alpha_dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None):
""" Alpha dropout layer
Alpha Dropout is a dropout that maintains the self-normalizing property.
For an input with zero mean and unit standard deviation, the output of Alpha Dropout maintains
the original mean and standard deviation of the input.
Klambauer G. et al "`Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_"
"""
def _dropped_inputs():
return tf.contrib.nn.alpha_dropout(inputs, 1-rate, noise_shape=noise_shape, seed=seed)
return tf.cond(training, _dropped_inputs, lambda: tf.identity(inputs), name=name)
| """ Contains common layers """
import numpy as np
import tensorflow as tf
def flatten2d(inputs, name=None):
""" Flatten tensor to two dimensions (batch_size, item_vector_size) """
x = tf.convert_to_tensor(inputs)
dims = tf.reduce_prod(tf.shape(x)[1:])
x = tf.reshape(x, [-1, dims], name=name)
return x
def flatten(inputs, name=None):
""" Flatten tensor to two dimensions (batch_size, item_vector_size) using inferred shape and numpy """
x = tf.convert_to_tensor(inputs)
shape = x.get_shape().as_list()
dim = np.prod(shape[1:])
x = tf.reshape(x, [-1, dim], name=name)
return x
def maxout(inputs, depth, axis=-1, name='max'):
""" Shrink last dimension by making max pooling every ``depth`` channels """
with tf.name_scope(name):
x = tf.convert_to_tensor(inputs)
shape = x.get_shape().as_list()
shape[axis] = -1
shape += [depth]
for i, _ in enumerate(shape):
if shape[i] is None:
shape[i] = tf.shape(x)[i]
out = tf.reduce_max(tf.reshape(x, shape), axis=-1, keep_dims=False)
return out
def mip(inputs, depth, data_format='channels_last', name='mip'):
""" Maximum intensity projection by shrinking the channels dimension with max pooling every ``depth`` channels """
with tf.name_scope(name):
x = tf.convert_to_tensor(inputs)
axis = -1 if data_format == 'channels_last' else 1
num_layers = x.get_shape().as_list()[axis]
split_sizes = [depth] * (num_layers // depth)
if num_layers % depth:
split_sizes += [num_layers % depth]
splits = tf.split(x, split_sizes, axis=axis)
mips = []
for split in splits:
amip = tf.reduce_max(split, axis=axis)
mips.append(amip)
mips = tf.stack(mips, axis=axis)
return mips
def alpha_dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None):
""" Alpha dropout layer
Alpha Dropout is a dropout that maintains the self-normalizing property.
For an input with zero mean and unit standard deviation, the output of Alpha Dropout maintains
the original mean and standard deviation of the input.
Klambauer G. et al "`Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_"
"""
def dropped_inputs():
return tf.contrib.nn.alpha_dropout(inputs, 1-rate, noise_shape=noise_shape, seed=seed)
return tf.cond(training, dropped_inputs, lambda: tf.identity(inputs)) | Python | 0.000099 |
130663a47fe3c497aedd39acd12de70bab230dec | make things login free | src/datahub/browser/views.py | src/datahub/browser/views.py | import json, sys, re, hashlib, smtplib, base64, urllib, os
from auth import *
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from django.core.validators import email_re
from django.db.utils import IntegrityError
from django.utils.http import urlquote_plus
'''
@author: Anant Bhardwaj
@date: Mar 21, 2013
Datahub Web Handler
'''
@login_required
def home(request):
try:
user = request.session[kUsername]
return HttpResponseRedirect('/%s' %(user))
except KeyError:
return HttpResponseRedirect('/login')
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def user(request, username):
try:
if(username):
res = manager.list_repos(username)
repos = [t[0] for t in res['tuples']]
return render_to_response("user.html", {
'username': username,
'repos': repos})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def repo(request, username, repo):
try:
res = manager.list_tables(username, repo)
tables = [t[0] for t in res['tuples']]
return render_to_response("repo.html", {
'username': username,
'repo': repo,
'tables': tables})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def table(request, username, repo, table):
try:
res = manager.execute_sql(
username=username,
query='SELECT * from %s.%s.%s' %(username, repo, table))
column_names = res['column_names']
tuples = res['tuples']
return render_to_response("table.html", {
'username': username,
'repo': repo,
'table': table,
'column_names': column_names,
'tuples': tuples})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
| import json, sys, re, hashlib, smtplib, base64, urllib, os
from auth import *
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from django.core.validators import email_re
from django.db.utils import IntegrityError
from django.utils.http import urlquote_plus
'''
@author: Anant Bhardwaj
@date: Mar 21, 2013
Datahub Web Handler
'''
@login_required
def home(request):
try:
user = request.session[kUsername]
return HttpResponseRedirect(user)
except KeyError:
return HttpResponseRedirect('/login')
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def user(request, username):
try:
if(username):
res = manager.list_repos(username)
repos = [t[0] for t in res['tuples']]
return render_to_response("user.html", {
'username': username,
'repos': repos})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def repo(request, username, repo):
try:
res = manager.list_tables(username, repo)
tables = [t[0] for t in res['tuples']]
return render_to_response("repo.html", {
'username': username,
'repo': repo,
'tables': tables})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def table(request, username, repo, table):
try:
res = manager.execute_sql(
username=username,
query='SELECT * from %s.%s.%s' %(username, repo, table))
column_names = res['column_names']
tuples = res['tuples']
return render_to_response("table.html", {
'username': username,
'repo': repo,
'table': table,
'column_names': column_names,
'tuples': tuples})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
| Python | 0 |
8712b50048b3fe42fbeb725f92f20bda08cfcc28 | update output string formatting | sknano/structure_io/_xyz_structure_data.py | sknano/structure_io/_xyz_structure_data.py | # -*- coding: utf-8 -*-
"""
==============================================================================
XYZ format (:mod:`sknano.structure_io._xyz_structure_data`)
==============================================================================
.. currentmodule:: sknano.structure_io._xyz_structure_data
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
from pksci.chemistry import Atom
from pkshared.tools.fiofuncs import get_fpath
from ._structure_data import StructureReader, StructureReaderError, \
StructureWriter
__all__ = ['XYZDATA', 'XYZReader', 'XYZWriter']
class XYZReader(StructureReader):
"""Class for reading xyz chemical file format.
Parameters
----------
xyzfile : str
xyz structure file
"""
def __init__(self, fname=None):
super(XYZReader, self).__init__(fname=fname)
self._read()
def _read(self):
with open(self._fname, 'r') as f:
self._Natoms = int(f.readline().strip())
self._comment_line = f.readline().strip()
lines = f.readlines()
for line in lines:
s = line.strip().split()
if len(s) != 0:
atom = \
Atom(s[0], x=float(s[1]), y=float(s[2]), z=float(s[3]))
self._atoms.append(atom)
class XYZWriter(StructureWriter):
"""Class for writing xyz chemical file format."""
@classmethod
def write(cls, fname=None, atoms=None, comment_line=None):
"""Write structure data to file.
Parameters
----------
fname : str
atoms : Atoms
:py:class:`~pksci.chemistry.Atoms` instance.
comment_line : str, optional
"""
if fname is None:
raise TypeError('fname argument must be a string!')
elif atoms is None:
raise TypeError('atoms argument must be an Atoms object')
else:
fname = get_fpath(fname=fname, ext='xyz', overwrite=True)
if comment_line is None:
comment_line = fname
atoms.fix_minus_zero_coords()
with open(fname, 'w') as f:
f.write('{:d}\n'.format(atoms.Natoms))
f.write('{}\n'.format(comment_line))
for atom in atoms:
f.write('{:>3s}{:15.5f}{:15.5f}{:15.5f}\n'.format(
atom.symbol, atom.x, atom.y, atom.z))
class XYZDATA(XYZReader):
"""Class for reading and writing structure data in XYZ data format.
Parameters
----------
fname : str, optional
"""
def __init__(self, fname=None):
try:
super(XYZDATA, self).__init__(fname=fname)
except StructureReaderError:
pass
def write(self, xyzfile=None):
if xyzfile is None:
xyzfile = self._fname
XYZWriter.write(fname=xyzfile, atoms=self._atoms,
comment_line=self._comment_line)
| # -*- coding: utf-8 -*-
"""
==============================================================================
XYZ format (:mod:`sknano.structure_io._xyz_structure_data`)
==============================================================================
.. currentmodule:: sknano.structure_io._xyz_structure_data
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
from pksci.chemistry import Atom
from pkshared.tools.fiofuncs import get_fpath
from ._structure_data import StructureReader, StructureReaderError, \
StructureWriter
__all__ = ['XYZDATA', 'XYZReader', 'XYZWriter']
class XYZReader(StructureReader):
"""Class for reading xyz chemical file format.
Parameters
----------
xyzfile : str
xyz structure file
"""
def __init__(self, fname=None):
super(XYZReader, self).__init__(fname=fname)
self._read()
def _read(self):
with open(self._fname, 'r') as f:
self._Natoms = int(f.readline().strip())
self._comment_line = f.readline().strip()
lines = f.readlines()
for line in lines:
s = line.strip().split()
if len(s) != 0:
atom = \
Atom(s[0], x=float(s[1]), y=float(s[2]), z=float(s[3]))
self._atoms.append(atom)
class XYZWriter(StructureWriter):
"""Class for writing xyz chemical file format."""
@classmethod
def write(cls, fname=None, atoms=None, comment_line=None):
"""Write structure data to file.
Parameters
----------
fname : str
atoms : Atoms
:py:class:`~pksci.chemistry.Atoms` instance.
comment_line : str, optional
"""
if fname is None:
raise TypeError('fname argument must be a string!')
elif atoms is None:
raise TypeError('atoms argument must be an Atoms object')
else:
fname = get_fpath(fname=fname, ext='xyz', overwrite=True)
if comment_line is None:
comment_line = fname
atoms.fix_minus_zero_coords()
with open(fname, 'w') as f:
f.write('{:d}\n'.format(atoms.Natoms))
f.write('{}\n'.format(comment_line))
for atom in atoms:
f.write('{:3s} {:10.5f} {:10.5f} {:10.5f}\n'.format(
atom.symbol, atom.x, atom.y, atom.z))
class XYZDATA(XYZReader):
"""Class for reading and writing structure data in XYZ data format.
Parameters
----------
fname : str, optional
"""
def __init__(self, fname=None):
try:
super(XYZDATA, self).__init__(fname=fname)
except StructureReaderError:
pass
def write(self, xyzfile=None):
if xyzfile is None:
xyzfile = self._fname
XYZWriter.write(fname=xyzfile, atoms=self._atoms,
comment_line=self._comment_line)
| Python | 0.000003 |
29a5ec45e76681865c62163e2580c0bfd4a6e241 | Enhance comments | lc0045_jump_game_ii.py | lc0045_jump_game_ii.py | """Leetcode 45. Jump Game II
Hard
URL: https://leetcode.com/problems/jump-game-ii/
Given an array of non-negative integers, you are initially positioned at
the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
Example:
Input: [2,3,1,1,4]
Output: 2
Explanation: The minimum number of jumps to reach the last index is 2.
Jump 1 step from index 0 to 1, then 3 steps to the last index.
Note:
You can assume that you can always reach the last index.
"""
class SolutionDPGreedy(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n^2), where n is length of nums.
Space complexity: O(n).
"""
n = len(nums)
# Apply DP with table T, where T[i] is min jumps to reach i.
T = [n] * n
T[0] = 0
# Iterate through from left to update T[reach+1] ~ T[i+nums[i]].
reach = 0
for i in range(n):
for j in range(reach + 1, min(i + nums[i], n - 1) + 1):
T[j] = min(T[j], T[i] + 1)
reach = max(reach, i + nums[i])
return T[-1]
class SolutionBFSGreedy1(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n), where n is length of nums.
Space complexity: O(1).
"""
n = len(nums)
# Apply greedy algorithm to check index i in prev_reach and reach.
prev_reach, reach = -1, 0
result = 0
for i in range(n):
# Check if reached last index already.
if reach >= n - 1:
break
# Update result if prev_reach is behind current index.
if prev_reach < i:
result += 1
prev_reach = reach
# Update reach with max jump from current index.
reach = max(reach, i + nums[i])
return result
class SolutionBFSGreedy2(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n), where n is length of nums.
Space complexity: O(1).
"""
n = len(nums)
# Apply greedy algorithm to check index i in cur_reach and reach.
cur_reach, reach = 0, 0
result = 0
for i in range(n - 1):
# Break if reaches last index.
if cur_reach >= n - 1:
break
# Update result with max jump from current index.
reach = max(reach, i + nums[i])
# If i reaches cur_reach, trigger another jump and update cur_reach.
if i == cur_reach:
result += 1
cur_reach = reach
return result
def main():
# Outpout: 2
nums = [2,3,1,1,4]
print SolutionDPGreedy().jump(nums)
print SolutionBFSGreedy1().jump(nums)
print SolutionBFSGreedy2().jump(nums)
# Outpout: 2
nums = [7,0,9,6,9,6,1,7,9,0,1,2,9,0,3]
print SolutionDPGreedy().jump(nums)
print SolutionBFSGreedy1().jump(nums)
print SolutionBFSGreedy2().jump(nums)
if __name__ == '__main__':
main()
| """Leetcode 45. Jump Game II
Hard
URL: https://leetcode.com/problems/jump-game-ii/
Given an array of non-negative integers, you are initially positioned at
the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
Example:
Input: [2,3,1,1,4]
Output: 2
Explanation: The minimum number of jumps to reach the last index is 2.
Jump 1 step from index 0 to 1, then 3 steps to the last index.
Note:
You can assume that you can always reach the last index.
"""
class SolutionDPGreedy(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n^2), where n is length of nums.
Space complexity: O(n).
"""
n = len(nums)
# Create table T, where T[i] is min number of jumps to reach i.
T = [n] * n
T[0] = 0
# For each index i, loop to update T[reach+1] ~ T[i+nums[i]].
reach = 0
for i in range(n):
for j in range(reach + 1, min(i + nums[i], n - 1) + 1):
T[j] = min(T[j], T[i] + 1)
reach = max(reach, i + nums[i])
return T[-1]
class SolutionBFSGreedy1(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n), where n is length of nums.
Space complexity: O(1).
"""
n = len(nums)
# Apply greedy algorithm to check index i in prev_reach and reach.
prev_reach, reach = -1, 0
result = 0
for i in range(n):
# Check if reached last index already.
if reach >= n - 1:
break
# Update jump if prev_reach is behind current index.
if prev_reach < i:
result += 1
prev_reach = reach
# Update reach with current index and jump.
reach = max(reach, i + nums[i])
return result
class SolutionBFSGreedy2(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n), where n is length of nums.
Space complexity: O(1).
"""
n = len(nums)
# Apply greedy algorithm to check index i in cur_reach and reach.
cur_reach, reach = 0, 0
result = 0
for i in range(n - 1):
# Break if reaches last index.
if cur_reach >= n - 1:
break
# Update reach with current index and jump.
reach = max(reach, i + nums[i])
# If i reaches cur_reach, trigger another jump and update cur_reach.
if i == cur_reach:
result += 1
cur_reach = reach
return result
def main():
# Outpout: 2
nums = [2,3,1,1,4]
print SolutionDPGreedy().jump(nums)
print SolutionBFSGreedy1().jump(nums)
print SolutionBFSGreedy2().jump(nums)
# Outpout: 2
nums = [7,0,9,6,9,6,1,7,9,0,1,2,9,0,3]
print SolutionDPGreedy().jump(nums)
print SolutionBFSGreedy1().jump(nums)
print SolutionBFSGreedy2().jump(nums)
if __name__ == '__main__':
main()
| Python | 0 |
87dd116aea6e3c8d9d436ce6b5bf1fbbe0ff0788 | Fix incorrect role assignment in migration. | keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py | keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py | import json
import sqlalchemy as sql
from keystone import config
CONF = config.CONF
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
sql.Table('user', meta, autoload=True)
sql.Table('role', meta, autoload=True)
sql.Table('project', meta, autoload=True)
new_metadata_table = sql.Table('user_project_metadata',
meta,
autoload=True)
old_metadata_table = sql.Table('metadata', meta, autoload=True)
session = sql.orm.sessionmaker(bind=migrate_engine)()
for metadata in session.query(old_metadata_table):
if config.CONF.member_role_id not in metadata.data:
data = json.loads(metadata.data)
data['roles'].append(config.CONF.member_role_id)
else:
data = metadata.data
r = session.query(new_metadata_table).filter_by(
user_id=metadata.user_id,
project_id=metadata.tenant_id).first()
if r is not None:
# roles should be the union of the two role lists
old_roles = data['roles']
new_roles = json.loads(r.data)['roles']
data['roles'] = list(set(old_roles) | set(new_roles))
q = new_metadata_table.update().where(
new_metadata_table.c.user_id == metadata.user_id).where(
new_metadata_table.c.project_id ==
metadata.tenant_id).values(data=json.dumps(data))
else:
q = new_metadata_table.insert().values(
user_id=metadata.user_id,
project_id=metadata.tenant_id,
data=json.dumps(data))
session.execute(q)
session.commit()
old_metadata_table.drop()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
sql.Table('user', meta, autoload=True)
sql.Table('project', meta, autoload=True)
metadata_table = sql.Table(
'metadata',
meta,
sql.Column(
u'user_id',
sql.String(64),
primary_key=True),
sql.Column(
u'tenant_id',
sql.String(64),
primary_key=True),
sql.Column('data',
sql.Text()))
metadata_table.create(migrate_engine, checkfirst=True)
user_project_metadata_table = sql.Table(
'user_project_metadata',
meta,
autoload=True)
metadata_table = sql.Table(
'metadata',
meta,
autoload=True)
session = sql.orm.sessionmaker(bind=migrate_engine)()
for metadata in session.query(user_project_metadata_table):
if 'roles' in metadata:
metadata_table.insert().values(
user_id=metadata.user_id,
tenant_id=metadata.project_id)
session.close()
| import json
import sqlalchemy as sql
from keystone import config
CONF = config.CONF
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
sql.Table('user', meta, autoload=True)
sql.Table('role', meta, autoload=True)
sql.Table('project', meta, autoload=True)
new_metadata_table = sql.Table('user_project_metadata',
meta,
autoload=True)
conn = migrate_engine.connect()
old_metadata_table = sql.Table('metadata', meta, autoload=True)
session = sql.orm.sessionmaker(bind=migrate_engine)()
for metadata in session.query(old_metadata_table):
if config.CONF.member_role_id not in metadata.data:
data = json.loads(metadata.data)
data['roles'].append(config.CONF.member_role_id)
else:
data = metadata.data
r = session.query(new_metadata_table).filter_by(
user_id=metadata.user_id,
project_id=metadata.tenant_id).first()
if r is not None:
# roles should be the union of the two role lists
old_roles = data['roles']
new_roles = json.loads(r.data)['roles']
data['roles'] = list(set(old_roles) | set(new_roles))
q = new_metadata_table.update().where(
new_metadata_table.c.user_id == metadata.user_id and
new_metadata_table.c.project_id == metadata.tenant_id).values(
data=json.dumps(data))
else:
q = new_metadata_table.insert().values(
user_id=metadata.user_id,
project_id=metadata.tenant_id,
data=json.dumps(data))
conn.execute(q)
session.close()
old_metadata_table.drop()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
sql.Table('user', meta, autoload=True)
sql.Table('project', meta, autoload=True)
metadata_table = sql.Table(
'metadata',
meta,
sql.Column(
u'user_id',
sql.String(64),
primary_key=True),
sql.Column(
u'tenant_id',
sql.String(64),
primary_key=True),
sql.Column('data',
sql.Text()))
metadata_table.create(migrate_engine, checkfirst=True)
user_project_metadata_table = sql.Table(
'user_project_metadata',
meta,
autoload=True)
metadata_table = sql.Table(
'metadata',
meta,
autoload=True)
session = sql.orm.sessionmaker(bind=migrate_engine)()
for metadata in session.query(user_project_metadata_table):
if 'roles' in metadata:
metadata_table.insert().values(
user_id=metadata.user_id,
tenant_id=metadata.project_id)
session.close()
| Python | 0.000729 |
00ca89242b64d29a034aa03b1e76abef617f1b26 | put validation back | application/frontend/forms.py | application/frontend/forms.py | from datetime import date
from flask import request
from flask_wtf import Form
from wtforms import (
StringField,
HiddenField,
BooleanField,
DateField,
PasswordField,
SubmitField,
SelectField,
RadioField,
TextAreaField
)
from wtforms.validators import DataRequired, ValidationError, NumberRange
from datatypes import country_code_validator
from application.frontend.field_helpers import countries_list_for_selector
class ValidateDateNotInFuture(object):
def __init__(self):
self.message = "The date must not be in the future"
def __call__(self, form, field):
self._validate_date_not_in_future(form, field.data)
def _validate_date_not_in_future(self, form, date_field):
if date_field > date.today():
raise ValidationError('Date cannot be in the future')
class LoginForm(Form):
email = StringField(validators=[DataRequired()])
password = PasswordField(validators=[DataRequired()])
submit = SubmitField('Login')
remember = BooleanField('Remember me')
next = HiddenField()
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
self.remember.default = True
class ChangeForm(Form):
title_number = HiddenField('Title Number')
confirm = BooleanField('Confirm')
proprietor_previous_full_name = HiddenField('Previous full name')
proprietor_new_full_name = StringField('New full name', validators=[DataRequired()])
partner_name = StringField('Partner\'s full name', validators=[DataRequired()])
marriage_date = DateField('Date of marriage', format='%d-%m-%Y',
validators=[DataRequired(), ValidateDateNotInFuture()],
description="For example, 20-08-2011")
marriage_place = StringField('Location of marriage ceremony', validators=[DataRequired()])
marriage_country = SelectField('Country',
validators=[DataRequired(), country_code_validator.wtform_validator()],
choices=countries_list_for_selector)
marriage_certificate_number = StringField('Marriage certificate number', validators=[DataRequired()])
class ConfirmForm(ChangeForm):
"""
Inherits from ChangeForm and makes all the data caught on the first page hidden.
"""
title_number = HiddenField('Title Number')
confirm = BooleanField('Confirm')
proprietor_previous_full_name = HiddenField('Previous full name')
proprietor_new_full_name = HiddenField('New full name')
partner_name = HiddenField('Partner\'s full name')
marriage_date = HiddenField('Date of marriage')
marriage_place = HiddenField('Location of marriage ceremony')
marriage_country = HiddenField('Country of marriage ceremony')
marriage_certificate_number = HiddenField('Marriage certificate number')
class ConveyancerAddClientForm(Form):
full_name = StringField('Full name', validators=[DataRequired()])
date_of_birth = DateField('Date of birth', format='%d-%m-%Y',
validators=[DataRequired(), ValidateDateNotInFuture()],
description="For example, 20-08-2011")
address = TextAreaField('Address', validators=[DataRequired()])
telephone = StringField('Telephone', validators=[DataRequired()])
email = StringField('Email address', validators=[DataRequired()])
class SelectTaskForm(Form):
another_task = StringField('Please specify:')
buying_or_selling_property = RadioField(
'Is your client buying or selling this property?',
choices=[
('buying', 'Buying this property'),
('selling', 'Selling this property'),
('other', 'Another task')
])
class ConveyancerAddClientsForm(Form):
num_of_clients = StringField('How many clients will you act for?',
validators=[DataRequired(),
NumberRange(1, 2, "Number of clients cannot be more than two.")])
| from datetime import date
from flask import request
from flask_wtf import Form
from wtforms import (
StringField,
HiddenField,
BooleanField,
DateField,
PasswordField,
SubmitField,
SelectField,
RadioField,
TextAreaField
)
from wtforms.validators import DataRequired, ValidationError, NumberRange
from datatypes import country_code_validator
from application.frontend.field_helpers import countries_list_for_selector
class ValidateDateNotInFuture(object):
def __init__(self):
self.message = "The date must not be in the future"
def __call__(self, form, field):
self._validate_date_not_in_future(form, field.data)
def _validate_date_not_in_future(self, form, date_field):
if date_field > date.today():
raise ValidationError('Date cannot be in the future')
class LoginForm(Form):
email = StringField(validators=[DataRequired()])
password = PasswordField(validators=[DataRequired()])
submit = SubmitField('Login')
remember = BooleanField('Remember me')
next = HiddenField()
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
self.remember.default = True
class ChangeForm(Form):
title_number = HiddenField('Title Number')
confirm = BooleanField('Confirm')
proprietor_previous_full_name = HiddenField('Previous full name')
proprietor_new_full_name = StringField('New full name', validators=[DataRequired()])
partner_name = StringField('Partner\'s full name', validators=[DataRequired()])
marriage_date = DateField('Date of marriage', format='%d-%m-%Y',
validators=[DataRequired(), ValidateDateNotInFuture()],
description="For example, 20-08-2011")
marriage_place = StringField('Location of marriage ceremony', validators=[DataRequired()])
marriage_country = SelectField('Country',
validators=[DataRequired(), country_code_validator.wtform_validator()],
choices=countries_list_for_selector)
marriage_certificate_number = StringField('Marriage certificate number', validators=[DataRequired()])
class ConfirmForm(ChangeForm):
"""
Inherits from ChangeForm and makes all the data caught on the first page hidden.
"""
title_number = HiddenField('Title Number')
confirm = BooleanField('Confirm')
proprietor_previous_full_name = HiddenField('Previous full name')
proprietor_new_full_name = HiddenField('New full name')
partner_name = HiddenField('Partner\'s full name')
marriage_date = HiddenField('Date of marriage')
marriage_place = HiddenField('Location of marriage ceremony')
marriage_country = HiddenField('Country of marriage ceremony')
marriage_certificate_number = HiddenField('Marriage certificate number')
class ConveyancerAddClientForm(Form):
full_name = StringField('Full name', validators=[DataRequired()])
date_of_birth = DateField('Date of birth', format='%d-%m-%Y',
validators=[DataRequired(), ValidateDateNotInFuture()],
description="For example, 20-08-2011")
address = TextAreaField('Address', validators=[DataRequired()])
telephone = StringField('Telephone', validators=[DataRequired()])
email = StringField('Email address', validators=[DataRequired()])
class SelectTaskForm(Form):
another_task = StringField('Please specify:')
buying_or_selling_property = RadioField(
'Is your client buying or selling this property?',
choices=[
('buying', 'Buying this property'),
('selling', 'Selling this property'),
('other', 'Another task')
])
class ConveyancerAddClientsForm(Form):
# # num_of_clients = StringField('How many clients will you act for?',
# validators=[DataRequired(),
# NumberRange(1, 2, "Number of clients cannot be more than two.")])
num_of_clients = StringField('How many clients will you act for?',
validators=[DataRequired()]) | Python | 0 |
c9187cecbdb196343586378ca637d76079ff058f | Improve sub-package imports | src/minerva/storage/notification/__init__.py | src/minerva/storage/notification/__init__.py | # -*- coding: utf-8 -*-
__docformat__ = "restructuredtext en"
__copyright__ = """
Copyright (C) 2011-2013 Hendrikx-ITC B.V.
Distributed under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option) any later
version. The full license is in the file COPYING, distributed as part of
this software.
"""
from minerva.storage.notification.notificationstore import NotificationStore, \
NotificationStoreDescriptor
from minerva.storage.notification.attribute import Attribute, \
AttributeDescriptor
from minerva.storage.notification.package import Package
from minerva.storage.notification.record import Record | # -*- coding: utf-8 -*-
__docformat__ = "restructuredtext en"
__copyright__ = """
Copyright (C) 2011-2013 Hendrikx-ITC B.V.
Distributed under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option) any later
version. The full license is in the file COPYING, distributed as part of
this software.
"""
from engine import NotificationEngine
| Python | 0.000017 |
2c17f8997bbfb5f6a943b29bb9ef024fff304302 | hard-negative-mine | 4_hard_negative_mine.py | 4_hard_negative_mine.py |
import object_detector.file_io as file_io
import object_detector.detector as detector
import object_detector.factory as factory
import argparse as ap
DEFAULT_CONFIG_FILE = "conf/svhn.json"
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument('-c', "--config", help="Configuration File", default=DEFAULT_CONFIG_FILE)
args = vars(parser.parse_args())
conf = file_io.FileJson().read(args["config"])
#1. Create detector
detector = factory.Factory.create_detector(conf["descriptor"]["algorithm"],
conf["descriptor"]["parameters"],
conf["classifier"]["algorithm"],
conf["classifier"]["parameters"],
conf["classifier"]["output_file"])
#2. Load negative images
negative_image_files = file_io.list_files(conf["dataset"]["neg_data_dir"],
conf["dataset"]["neg_format"],
n_files_to_sample=conf["hard_negative_mine"]["n_images"])
#3. Get hard negative mined features
features, probs = detector.hard_negative_mine(negative_image_files,
conf["detector"]["window_dim"],
conf["hard_negative_mine"]["window_step"],
conf["hard_negative_mine"]["pyramid_scale"],
threshold_prob=conf["hard_negative_mine"]["min_probability"])
print "[HNM INFO] : number of mined negative patches {}".format(len(features))
print "[HNM INFO] : probabilities of mined negative patches {}".format(probs)
#4. Add hard negative mined features to the extractor
extractor = factory.Factory.create_extractor(conf["descriptor"]["algorithm"],
conf["descriptor"]["parameters"],
conf["detector"]["window_dim"],
conf["extractor"]["output_file"])
print "Before adding hard-negative-mined samples"
extractor.summary()
extractor.add_data(features, -1)
print "After adding hard-negative-mined samples"
extractor.summary()
extractor.save(data_file=conf["extractor"]["output_file"])
|
import object_detector.file_io as file_io
import object_detector.detector as detector
import object_detector.factory as factory
import argparse as ap
DEFAULT_CONFIG_FILE = "conf/car_side.json"
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument('-c', "--config", help="Configuration File", default=DEFAULT_CONFIG_FILE)
args = vars(parser.parse_args())
conf = file_io.FileJson().read(args["config"])
#1. Create detector
detector = factory.Factory.create_detector(conf["descriptor"]["algorithm"],
conf["descriptor"]["parameters"],
conf["classifier"]["algorithm"],
conf["classifier"]["parameters"],
conf["classifier"]["output_file"])
#2. Load negative images
negative_image_files = file_io.list_files(conf["dataset"]["neg_data_dir"],
conf["dataset"]["neg_format"],
n_files_to_sample=conf["hard_negative_mine"]["n_images"])
#3. Get hard negative mined features
features, probs = detector.hard_negative_mine(negative_image_files,
conf["detector"]["window_dim"],
conf["hard_negative_mine"]["window_step"],
conf["hard_negative_mine"]["pyramid_scale"],
threshold_prob=conf["hard_negative_mine"]["min_probability"])
print "[HNM INFO] : number of mined negative patches {}".format(len(features))
print "[HNM INFO] : probabilities of mined negative patches {}".format(probs)
#4. Add hard negative mined features to the extractor
extractor = factory.Factory.create_extractor(conf["descriptor"]["algorithm"],
conf["descriptor"]["parameters"],
conf["detector"]["window_dim"],
conf["extractor"]["output_file"])
print "Before adding hard-negative-mined samples"
extractor.summary()
extractor.add_data(features, -1)
print "After adding hard-negative-mined samples"
extractor.summary()
extractor.save(data_file=conf["extractor"]["output_file"])
| Python | 0.999416 |
334334c95a543de3e92c96ef807b2cad684f4362 | Update URL construction from FPLX db_refs | indra/databases/__init__.py | indra/databases/__init__.py | import logging
logger = logging.getLogger('databases')
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
identifiers_url = 'http://identifiers.org/'
if db_name == 'UP':
url = identifiers_url + 'uniprot/%s' % db_id
elif db_name == 'HGNC':
url = identifiers_url + 'hgnc/HGNC:%s' % db_id
elif db_name == 'IP':
url = identifiers_url + 'interpro/%s' % db_id
elif db_name == 'CHEBI':
url = identifiers_url + 'chebi/%s' % db_id
elif db_name == 'NCIT':
url = identifiers_url + 'ncit/%s' % db_id
elif db_name == 'GO':
url = identifiers_url + 'go/%s' % db_id
elif db_name == 'PUBCHEM':
if db_id.startswith('PUBCHEM:'):
db_id = db_id[8:]
url = identifiers_url + 'pubchem.compound/%s' % db_id
elif db_name == 'PF':
url = identifiers_url + 'pfam/%s' % db_id
elif db_name == 'MIRBASEM':
url = identifiers_url + 'mirbase.mature/%s' % db_id
elif db_name == 'MIRBASE':
url = identifiers_url + 'mirbase/%s' % db_id
elif db_name == 'MESH':
url = identifiers_url + 'mesh/%s' % db_id
elif db_name == 'HMDB':
url = identifiers_url + 'hmdb/%s' % db_id
# Special cases with no identifiers entry
elif db_name == 'FPLX':
url = 'http://identifiers.org/fplx/%s' % db_id
elif db_name == 'NXPFA':
url = 'https://www.nextprot.org/term/FA-%s' % db_id
elif db_name == 'TEXT':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
| import logging
logger = logging.getLogger('databases')
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
identifiers_url = 'http://identifiers.org/'
if db_name == 'UP':
url = identifiers_url + 'uniprot/%s' % db_id
elif db_name == 'HGNC':
url = identifiers_url + 'hgnc/HGNC:%s' % db_id
elif db_name == 'IP':
url = identifiers_url + 'interpro/%s' % db_id
elif db_name == 'CHEBI':
url = identifiers_url + 'chebi/%s' % db_id
elif db_name == 'NCIT':
url = identifiers_url + 'ncit/%s' % db_id
elif db_name == 'GO':
url = identifiers_url + 'go/%s' % db_id
elif db_name == 'PUBCHEM':
if db_id.startswith('PUBCHEM:'):
db_id = db_id[8:]
url = identifiers_url + 'pubchem.compound/%s' % db_id
elif db_name == 'PF':
url = identifiers_url + 'pfam/%s' % db_id
elif db_name == 'MIRBASEM':
url = identifiers_url + 'mirbase.mature/%s' % db_id
elif db_name == 'MIRBASE':
url = identifiers_url + 'mirbase/%s' % db_id
elif db_name == 'MESH':
url = identifiers_url + 'mesh/%s' % db_id
elif db_name == 'HMDB':
url = identifiers_url + 'hmdb/%s' % db_id
# Special cases with no identifiers entry
elif db_name == 'FPLX':
url = 'http://sorger.med.harvard.edu/indra/entities/%s' % db_id
elif db_name == 'NXPFA':
url = 'https://www.nextprot.org/term/FA-%s' % db_id
elif db_name == 'TEXT':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
| Python | 0 |
e8293bd1365c759d940297e48609ee69251b0d62 | split grant code for better suggestion | invenio_openaire/indexer.py | invenio_openaire/indexer.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Record modification prior to indexing."""
from __future__ import absolute_import, print_function
from elasticsearch import VERSION as ES_VERSION
def indexer_receiver(sender, json=None, record=None, index=None,
**dummy_kwargs):
"""Connect to before_record_index signal to transform record for ES."""
if index and index.startswith('grants-'):
code = json.get('code')
suggestions = [
code,
json.get('acronym'),
json.get('title')
]
if code and "_" in code:
suggestions.extend(code.split("_"))
if ES_VERSION[0] == 2:
# Generate suggest field
json['suggest'] = {
'input': [s for s in suggestions if s],
'output': json['title'],
'context': {
'funder': [json['funder']['doi']]
},
'payload': {
'id': json['internal_id'],
'legacy_id': (json['code'] if json.get('program') == 'FP7'
else json['internal_id']),
'code': json['code'],
'title': json['title'],
'acronym': json.get('acronym'),
'program': json.get('program'),
},
}
elif ES_VERSION[0] > 2:
# Generate suggest field
json['suggest'] = {
'input': [s for s in suggestions if s],
'contexts': {
'funder': [json['funder']['doi']]
}
}
json['legacy_id'] = json['code'] if json.get('program') == 'FP7' \
else json['internal_id']
elif index and index.startswith('funders-'):
suggestions = json.get('acronyms', []) + [json.get('name')]
if ES_VERSION[0] == 2:
# Generate suggest field
json['suggest'] = {
'input': [s for s in suggestions if s],
'output': json['name'],
'payload': {
'id': json['doi']
},
}
elif ES_VERSION[0] > 2:
json['suggest'] = {
'input': [s for s in suggestions if s],
}
| # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Record modification prior to indexing."""
from __future__ import absolute_import, print_function
from elasticsearch import VERSION as ES_VERSION
def indexer_receiver(sender, json=None, record=None, index=None,
**dummy_kwargs):
"""Connect to before_record_index signal to transform record for ES."""
if index and index.startswith('grants-'):
if ES_VERSION[0] == 2:
# Generate suggest field
suggestions = [
json.get('code'),
json.get('acronym'),
json.get('title')
]
json['suggest'] = {
'input': [s for s in suggestions if s],
'output': json['title'],
'context': {
'funder': [json['funder']['doi']]
},
'payload': {
'id': json['internal_id'],
'legacy_id': (json['code'] if json.get('program') == 'FP7'
else json['internal_id']),
'code': json['code'],
'title': json['title'],
'acronym': json.get('acronym'),
'program': json.get('program'),
},
}
elif ES_VERSION[0] > 2:
# Generate suggest field
suggestions = [
json.get('code'),
json.get('acronym'),
json.get('title')
]
json['suggest'] = {
'input': [s for s in suggestions if s],
'contexts': {
'funder': [json['funder']['doi']]
}
}
json['legacy_id'] = json['code'] if json.get('program') == 'FP7' \
else json['internal_id']
elif index and index.startswith('funders-'):
if ES_VERSION[0] == 2:
# Generate suggest field
suggestions = json.get('acronyms', []) + [json.get('name')]
json['suggest'] = {
'input': [s for s in suggestions if s],
'output': json['name'],
'payload': {
'id': json['doi']
},
}
elif ES_VERSION[0] > 2:
suggestions = json.get('acronyms', []) + [json.get('name')]
json['suggest'] = {
'input': [s for s in suggestions if s],
}
| Python | 0 |
d60dea7b7b1fb073eef2c350177b3920f32de748 | Add comments indicating source of formulae.. | 6/e6.py | 6/e6.py | #!/usr/bin/env python
# http://www.proofwiki.org/wiki/Sum_of_Sequence_of_Squares
def sum_seq_squares(n):
return (n * (n+1) * ((2*n)+1)) / 6
# http://www.regentsprep.org/regents/math/algtrig/ATP2/ArithSeq.htm
def sum_seq(n):
return (n * (n + 1)) / 2
def main():
sum_seq_sq_100 = sum_seq_squares(100)
sum_seq_100 = sum_seq(100)
sq_sum_seq_100 = sum_seq_100**2
diff = sq_sum_seq_100 - sum_seq_sq_100
print('diff is {0}'.format(diff))
if __name__ == '__main__':
main() | #!/usr/bin/env python
def sum_seq_squares(n):
return (n * (n+1) * ((2*n)+1)) / 6
def sum_seq(n):
return (n * (n + 1)) / 2
def main():
sum_seq_sq_100 = sum_seq_squares(100)
sum_seq_100 = sum_seq(100)
sq_sum_seq_100 = sum_seq_100**2
diff = sq_sum_seq_100 - sum_seq_sq_100
print('diff is {0}'.format(diff))
if __name__ == '__main__':
main() | Python | 0 |
36e8549053d28f51cc1e846e86bbdc8b32527cbe | Make app.py localhost only | app.py | app.py | #!/usr/bin/python3
from json import dumps
from datetime import datetime
import os
from bottle import app as bottleapp
from bottle import route, run, static_file, template
from pymongo import MongoClient
import sprout
os.chdir(os.path.dirname(os.path.abspath(__file__)))
mongo = MongoClient('localhost', 27017)
col = mongo['plant-rank']['users']
def readable(obj):
obj['class_name'] = {0: '陌生人',
1: '算法班',
2: 'C語法',
3: 'Py語法'}[obj['category']]
obj['class'] = {0: '',
1: 'label-primary',
2: 'label-warning',
3: 'label-success'}[obj['category']]
obj['algopoints'] = len(obj['algoprobs'])
obj['points'] = len(obj['problems'])
obj['problems'] = ', '.join(map(str, sorted(obj['problems'])))
obj['updated_at'] = (datetime.fromtimestamp(obj['updated_at'])
.strftime('%Y/%m/%d %H:%M'))
return obj
@route('/assets/<filename:path>')
def assets(filename):
return static_file(filename, root='./assets/')
@route('/')
def index():
board = list(map(readable, col.find({})))
countboard = sorted(board, key=lambda x: (x['points'], x['rate']), reverse=True)
algocountboard = sorted(board, key=lambda x: (x['algopoints'], x['points']),
reverse=True)
algoboard = sorted(board, key=lambda x: (x['rate'] if x['category'] == 1 else 0,
x['points']),
reverse=True)
return template('index.html', locals())
@route('/users/<uid>')
def user(uid):
board = map(readable, col.find({'uid': int(uid)}).limit(1))
return template('user.html', locals())
@route('/users/<uid>', method="POST")
def refresh(uid):
try:
sprout.refresh(int(uid))
except:
return dumps({'status': False})
else:
return dumps({'status': True})
run(app=bottleapp(), port=8787, host="127.0.0.1", debug=False, server='meinheld')
| #!/usr/bin/python3
from json import dumps
from datetime import datetime
import os
from bottle import app as bottleapp
from bottle import route, run, static_file, template
from pymongo import MongoClient
import sprout
os.chdir(os.path.dirname(os.path.abspath(__file__)))
mongo = MongoClient('localhost', 27017)
col = mongo['plant-rank']['users']
def readable(obj):
obj['class_name'] = {0: '陌生人',
1: '算法班',
2: 'C語法',
3: 'Py語法'}[obj['category']]
obj['class'] = {0: '',
1: 'label-primary',
2: 'label-warning',
3: 'label-success'}[obj['category']]
obj['algopoints'] = len(obj['algoprobs'])
obj['points'] = len(obj['problems'])
obj['problems'] = ', '.join(map(str, sorted(obj['problems'])))
obj['updated_at'] = (datetime.fromtimestamp(obj['updated_at'])
.strftime('%Y/%m/%d %H:%M'))
return obj
@route('/assets/<filename:path>')
def assets(filename):
return static_file(filename, root='./assets/')
@route('/')
def index():
board = list(map(readable, col.find({})))
countboard = sorted(board, key=lambda x: (x['points'], x['rate']), reverse=True)
algocountboard = sorted(board, key=lambda x: (x['algopoints'], x['points']),
reverse=True)
algoboard = sorted(board, key=lambda x: (x['rate'] if x['category'] == 1 else 0,
x['points']),
reverse=True)
return template('index.html', locals())
@route('/users/<uid>')
def user(uid):
board = map(readable, col.find({'uid': int(uid)}).limit(1))
return template('user.html', locals())
@route('/users/<uid>', method="POST")
def refresh(uid):
try:
sprout.refresh(int(uid))
except:
return dumps({'status': False})
else:
return dumps({'status': True})
run(app=bottleapp(), port=8787, host="0.0.0.0", debug=False, server='meinheld')
| Python | 0.000004 |
91ff11cde50ce2485c0a6725651931f88a085ca7 | Update get_time to handle timeout errors. | app.py | app.py | """ app.py """
from flask import Flask, render_template
import requests
app = Flask(__name__)
def get_time():
try:
response = requests.get('http://localhost:3001/time', timeout=3.0)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
return 'Unavailable'
return response.json().get('datetime')
def get_user():
response = requests.get('http://localhost:3002/user')
return response.json().get('name')
@app.errorhandler(500)
def page_not_found(_):
return 'Server error', 500
@app.route("/")
def hello():
time = get_time()
name = get_user()
return render_template('hello.html', name=name, time=time)
if __name__ == "__main__":
app.run(port=3000, debug=True)
| """ app.py """
from flask import Flask, render_template
import requests
app = Flask(__name__)
def get_time():
try:
response = requests.get('http://localhost:3001/time')
except requests.exceptions.ConnectionError:
return 'Unavailable'
return response.json().get('datetime')
def get_user():
response = requests.get('http://localhost:3002/user')
return response.json().get('name')
@app.errorhandler(500)
def page_not_found(_):
return 'Server error', 500
@app.route("/")
def hello():
time = get_time()
name = get_user()
return render_template('hello.html', name=name, time=time)
if __name__ == "__main__":
app.run(port=3000, debug=True)
| Python | 0 |
b2a1dcd25ecc9d50a975a41330a1620b52312857 | add docstring | letmecreate/click/motion.py | letmecreate/click/motion.py | #!/usr/bin/env python3
"""Python binding of Motion Click wrapper of LetMeCreate library."""
import ctypes
_lib = ctypes.CDLL('libletmecreate_click.so')
callback_type = ctypes.CFUNCTYPE(None, ctypes.c_uint8)
callbacks = [None, None]
def enable(mikrobus_index):
"""Enable the motion click.
Configures the EN pin as an output and set it to high.
mikrobus_index: must be 0 (MIKROBUS_1) or 1 (MIKROBUS_2)
Note: An exception is thrown if it fails to enable the Motion Click.
"""
ret = _lib.motion_click_enable(mikrobus_index)
if ret < 0:
raise Exception("motion click enable failed")
def attach_callback(mikrobus_index, callback):
"""Attach a callback triggered if an event is detected.
Returns the callback ID. The callback must be removed by calling
letmecreate.core.gpio_monitor.remove_callback().
mikrobus_index: must be 0 (MIKROBUS_1) or 1 (MIKROBUS_2)
callback: function must have one argument which can be safely ignored. This
argument indicates if the GPIO is on a falling or raising edge. In this
case, it triggers an event only if the INT pin is raising so this argument
will always be equal to 1.
Note: An exception is thrown if it fails to attach a callback.
"""
ptr = callback_type(callback)
ret = _lib.motion_click_attach_callback(mikrobus_index, ptr)
if ret < 0:
raise Exception("motion click attach callback failed")
callbacks[mikrobus_index] = ptr;
def disable(mikrobus_index):
"""Disable the Motion Click.
Note: An exception is thrown if it fails to disable the Motion Click.
"""
ret = _lib.motion_click_disable(mikrobus_index)
if ret < 0:
raise Exception("motion click disable failed")
| #!/usr/bin/env python3
import ctypes
_lib = ctypes.CDLL('libletmecreate_click.so')
callback_type = ctypes.CFUNCTYPE(None, ctypes.c_uint8)
callbacks = [None, None]
def enable(mikrobus_index):
ret = _lib.motion_click_enable(mikrobus_index)
if ret < 0:
raise Exception("motion click enable failed")
def attach_callback(mikrobus_index, callback):
ptr = callback_type(callback)
ret = _lib.motion_click_attach_callback(mikrobus_index, ptr)
if ret < 0:
raise Exception("motion click attach callback failed")
callbacks[mikrobus_index] = ptr;
def disable(mikrobus_index):
ret = _lib.motion_click_disable(mikrobus_index)
if ret < 0:
raise Exception("motion click disable failed")
| Python | 0 |
460b48c10461df264a30ac26630d7299370988cd | Support alternative URLs | gsl.py | gsl.py | #!/usr/bin/python
from urlparse import urlparse
import urllib
import urllib2
import click
import os
import hashlib
PACKAGE_SERVER = 'https://server-to-be-determined/'
@click.command()
@click.option('--package_id', help='Package ID', required=True)
@click.option('--download_location', default='./',
help='Location for the downloaded file')
def get(package_id, download_location):
package_found = False
for line in urllib2.urlopen(PACKAGE_SERVER + 'urls.tsv'):
if line.strip() and not line.startswith('#'):
iid, upstream_url, checksum, alternate_url = line.split('\t')
if iid == package_id.strip():
package_found = True
# I worry about this being unreliable. TODO: add target filename column?
pkg_name = urlparse(upstream_url).path.split('/')[-1]
storage_path = os.path.join(download_location, pkg_name)
if alternate_url.strip():
url = alternate_url
else:
url = PACKAGE_SERVER + checksum
urllib.urlretrieve(url, storage_path)
download_checksum = hashlib.sha256(open(storage_path, 'rb').read()).hexdigest()
if checksum != download_checksum:
print ('Checksum does not match, something seems to be wrong.\n'
'{expected}\t(expected)\n{actual}\t(downloaded)').format(
expected=checksum,
actual=download_checksum)
else:
print 'Download successful for %s.' % (pkg_name)
if not package_found:
print 'Package (%s) could not be found in this server.' % (package_id)
if __name__ == '__main__':
get()
| #!/usr/bin/python
from urlparse import urlparse
import urllib
import urllib2
import click
import os
import hashlib
PACKAGE_SERVER = 'https://server-to-be-determined/'
@click.command()
@click.option('--package_id', help='Package ID', required=True)
@click.option('--download_location', default='./',
help='Location for the downloaded file')
def get(package_id, download_location):
package_found = False
for line in urllib2.urlopen(PACKAGE_SERVER + 'urls.tsv'):
if line.strip() and not line.startswith('#'):
iid, upstream_url, checksum = line.split('\t')
if iid.strip() == package_id.strip():
package_found = True
# I worry about this being unreliable. TODO: add target filename column?
pkg_name = urlparse(upstream_url).path.split('/')[-1]
storage_path = os.path.join(download_location, pkg_name)
url = PACKAGE_SERVER + checksum
urllib.urlretrieve(url, storage_path)
download_checksum = hashlib.sha256(open(storage_path, 'rb').read()).hexdigest()
if checksum.strip() != download_checksum:
print 'Checksum does not match, something seems to be wrong.\n'
print checksum, '\t(expected)'
print download_checksum, '\t(downloaded)'
else:
print 'Download sucessfull for %s.' % (pkg_name)
if not package_found:
print 'Package (%s) could not be found in this servive.' % (package_id)
if __name__ == '__main__':
get()
| Python | 0.000001 |
9c012f3b5609b557b9d14059f2b2a6412283e0ed | support option ax='new' | src/pyquickhelper/helpgen/graphviz_helper.py | src/pyquickhelper/helpgen/graphviz_helper.py | """
@file
@brief Helper about graphviz.
"""
import os
from ..loghelper import run_cmd
from .conf_path_tools import find_graphviz_dot
def plot_graphviz(dot, ax=None, temp_dot=None, temp_img=None, dpi=300):
"""
Plots a dot graph into a :epkg:`matplotlib` plot.
@param dot dot language
@param ax existing ax
@param temp_dot temporary file, if None,
a file is created and removed
@param temp_img temporary image, if None,
a file is created and removed
@param dpi dpi
@return ax
"""
if temp_dot is None:
temp_dot = "temp_%d.dot" % id(dot)
clean_dot = True
else:
clean_dot = False
if temp_img is None:
temp_img = "temp_%d.png" % id(dot)
clean_img = True
else:
clean_img = False
with open(temp_dot, "w", encoding="utf-8") as f:
f.write(dot)
dot_path = find_graphviz_dot()
cmd = '"%s" -Gdpi=%d -Tpng -o "%s" "%s"' % (
dot_path, dpi, temp_img, temp_dot)
out, err = run_cmd(cmd, wait=True)
if err is not None:
err = err.strip("\r\n\t ")
if len(err) > 0:
if clean_dot:
os.remove(temp_dot)
if clean_img and os.path.exists(temp_img):
os.remove(temp_img)
raise RuntimeError(
"Unable to run command line"
"\n---CMD---\n{}\n---OUT---\n{}"
"\n---ERR---\n{}".format(
cmd, out, err))
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
elif isinstance(ax, str) and ax == 'new':
import matplotlib.pyplot as plt
_, ax = plt.subplots(1, 1)
image = plt.imread(temp_img)
else:
import matplotlib.pyplot as plt
image = plt.imread(temp_img)
ax.imshow(image)
if clean_dot:
os.remove(temp_dot)
if clean_img and os.path.exists(temp_img):
os.remove(temp_img)
return ax
| """
@file
@brief Helper about graphviz.
"""
import os
from ..loghelper import run_cmd
from .conf_path_tools import find_graphviz_dot
def plot_graphviz(dot, ax=None, temp_dot=None, temp_img=None, dpi=300):
"""
Plots a dot graph into a :epkg:`matplotlib` plot.
@param dot dot language
@param ax existing ax
@param temp_dot temporary file, if None,
a file is created and removed
@param temp_img temporary image, if None,
a file is created and removed
@param dpi dpi
@return ax
"""
if temp_dot is None:
temp_dot = "temp_%d.dot" % id(dot)
clean_dot = True
else:
clean_dot = False
if temp_img is None:
temp_img = "temp_%d.png" % id(dot)
clean_img = True
else:
clean_img = False
with open(temp_dot, "w", encoding="utf-8") as f:
f.write(dot)
dot_path = find_graphviz_dot()
cmd = '"%s" -Gdpi=%d -Tpng -o "%s" "%s"' % (
dot_path, dpi, temp_img, temp_dot)
out, err = run_cmd(cmd, wait=True)
if err is not None:
err = err.strip("\r\n\t ")
if len(err) > 0:
if clean_dot:
os.remove(temp_dot)
if clean_img and os.path.exists(temp_img):
os.remove(temp_img)
raise RuntimeError(
"Unable to run command line"
"\n---CMD---\n{}\n---OUT---\n{}"
"\n---ERR---\n{}".format(
cmd, out, err))
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
image = plt.imread(temp_img)
else:
import matplotlib.pyplot as plt
image = plt.imread(temp_img)
ax.imshow(image)
if clean_dot:
os.remove(temp_dot)
if clean_img and os.path.exists(temp_img):
os.remove(temp_img)
return ax
| Python | 0.000036 |
cbe379efeb7592e9c918fc4d092098b74a3b8c1a | Update Deck.py - Add shuffle method to shuffle the deck and then return the shuffled cards. | Deck.py | Deck.py | #Deck
class Deck:
'''Definition of a card deck.'''
from random import shuffle as rShuffle
def __init__(self,hasJoker=False):
self.suits = ['H','D','S','C']
self.values = [str(x) for x in range(2,10)] #2-9 cards
self.values.extend(['T','J','Q','K','A']) #Face cards (including the 10s)
#Assemble deck
self.cards = [(v,s) for v in self.values for s in self.suits]
#Add Joker cards (2) as 'WW' if needed
if(hasJoker):
self.cards.extend([('W','W'),('W','W')])
#Draw a card from the deck and return a card
def draw(self,fromTop=True):
#Remove from the front/top of deck
if fromTop:
return self.cards.pop(0)
#Remove from the back/bottom of deck
else:
return self.cards.pop()
#Return how many cards are in deck
def sizeOf(self):
return len(self.cards)
#Shuffle deck and return the newly shuffled deck
def shuffle(self):
#Use random.shuffle() method
rShuffle(self.cards)
return self.cards
| #Deck
class Deck:
'''Definition of a card deck.'''
def __init__(self,hasJoker=False):
self.suits = ['H','D','S','C']
self.values = [str(x) for x in range(2,10)] #2-9 cards
self.values.extend(['T','J','Q','K','A']) #Face cards (including the 10s)
#Assemble deck
self.cards = [(v,s) for v in self.values for s in self.suits]
#Add Joker cards (2) as 'WW' if needed
if(hasJoker):
self.cards.extend([('W','W'),('W','W')])
#Draw a card from the deck and return a card
def draw(self,fromTop=True):
#Remove from the front/top of deck
if fromTop:
return self.cards.pop(0)
#Remove from the back/bottom of deck
else:
return self.cards.pop()
#Return how many cards are in deck
def sizeOf(self):
return len(self.cards) | Python | 0 |
3bd37ff8b91787da22f925ab858157bffa5698d7 | Remove unnecessary import | Fibo.py | Fibo.py | import sys
def Fibo(num):
if num <= 2:
return 1
else:
return Fibo(num-1)+Fibo(num-2)
print(Fibo(int(sys.argv[1])))
| import math
import sys
def Fibo(num):
if num <= 2:
return 1
else:
return Fibo(num-1)+Fibo(num-2)
print(Fibo(int(sys.argv[1])))
| Python | 0 |
6855564716827546a5b68c154b0d95daba969119 | add more user tests | src/inventory/tests/tests.py | src/inventory/tests/tests.py | from django.test import TestCase
from inventory.models import *
class UserTests(TestCase):
def test_for_fields(self):
""" saving and loading users"""
initial_user = User(id=10, username="user", password="pass", email="email",
f_name="f_name", l_name="l_name", active=True).save()
loaded_user = User.objects.get(id=10)
self.assertEqual(loaded_user.id, 10)
self.assertEqual(loaded_user.username, "user")
self.assertEqual(loaded_user.password, "pass")
self.assertEqual(loaded_user.email, "email")
self.assertEqual(loaded_user.f_name, "f_name")
self.assertEqual(loaded_user.l_name, "l_name")
self.assertEqual(loaded_user.active, True)
self.assertEqual(unicode(loaded_user), "user")
# class CardTests(TestCase):
# """test saving and loading cards"""
# initial_card = Card(id=1, repo_base="repo_base", repo_name="repo_name",
# card_name="card_name", query="query").save()
# loaded_card=Card.objects.get(id=1)
# self.assertEqual(loaded_card.card_name, "card_name")
| from django.test import TestCase
from inventory.models import *
class UserTests(TestCase):
def test_for_fields(self):
""" saving and loading users"""
initial_user = User(username="user", password="pass", email="email",
f_name="fname", l_name="lname", active=True).save()
loaded_user = User.objects.get(username="user")
self.assertEqual(loaded_user.username, "user")
self.assertEqual(loaded_user.password, "pass")
self.assertEqual(loaded_user.email, "email")
self.assertEqual(loaded_user.f_name, "fname")
self.assertEqual(loaded_user.l_name, "lname")
self.assertEqual(loaded_user.active, True)
self.assertEqual(unicode(loaded_user), "user")
class CardTests(TestCase):
"""test cards"""
| Python | 0 |
457642b37cf84f789530d7466eff2fb810f560fc | Add tests for Router.start/stop/run as well as call_at. | lib/rapidsms/tests/test_router.py | lib/rapidsms/tests/test_router.py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import unittest, threading, time, datetime
from rapidsms.router import Router
from rapidsms.connection import Connection
from rapidsms.message import Message
from rapidsms.backends.backend import Backend
from rapidsms.tests.harness import MockApp, MockLogger
class TestRouter(unittest.TestCase):
def test_log(self):
r = Router()
r.logger = MockLogger()
r.log("debug", "test message", 5)
self.assertEquals(r.logger[0], (r,"debug","test message",5),
"log() calls self.logger.write()")
def test_set_logger(self):
### TODO
pass
def test_build_component (self):
r = Router()
r.logger = MockLogger()
component = r.build_component("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app"})
self.assertEquals(type(component), MockApp, "component has right type")
self.assertEquals(component.title, "test app", "component has right title")
self.assertRaises(Exception, r.build_component,
("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app", "argh": "no config"}),
"build_component gracefully handles bad configuration options")
def test_add_backend (self):
r = Router()
r.logger = MockLogger()
r.add_backend({"type":"backend", "title":"test_backend"})
self.assertEquals(len(r.backends), 1, "backends has 1 item")
self.assertEquals(type(r.backends[0]), Backend, "backend has correct type")
def test_add_app (self):
### TODO
pass
def test_start_backend (self):
### TODO
pass
def test_start_all_apps (self):
### TODO
pass
def test_start_all_backends (self):
### TODO
pass
def test_stop_all_backends (self):
### TODO
pass
def test_start_and_stop (self):
r = Router()
r.logger = MockLogger()
threading.Thread(target=r.start).start()
self.assertTrue(r.running)
r.stop()
self.assertTrue(not r.running)
# not waiting for the router to shutdown causes exceptions
# on global destruction. (race condition)
time.sleep(1.0)
def test_run(self):
r = Router()
r.logger = MockLogger()
app = r.build_component("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app"})
r.apps.append(app)
r.add_backend({"type":"backend", "title":"test_backend"})
backend = r.get_backend("test-backend") # NOTE the dash; FIXME
msg = backend.message("test user", "test message")
r.send(msg)
r.run()
received = app.calls[-1][1]
self.assertEquals(msg, received, "message is identical")
self.assertEquals(msg.connection, received.connection, "same connection")
self.assertEquals(msg.text, received.text, "same text")
def test_call_at (self):
def callback(stash, arg1, **argv):
stash["arg1"]=arg1
if "try_again" in argv and "try_again" not in stash:
stash["try_again"] = False
return 1.0
else:
stash.update(argv)
r = Router()
r.logger = MockLogger()
stash = {}
r.call_at(0.5, callback, stash, 1, arg2="a")
r.call_at(datetime.datetime.now() + datetime.timedelta(seconds=0.5), callback, stash, 1, arg3="b")
r.call_at(datetime.timedelta(seconds=1.0), callback, stash, 1, try_again=True)
r.call_at(3, callback, stash, 2)
threading.Thread(target=r.start).start()
time.sleep(1.0)
self.assertEquals(stash["arg1"], 1, "*args ok")
self.assertEquals(stash["arg2"], "a", "**kargs ok")
self.assertEquals(stash["arg3"], "b", "datetime works")
self.assertEquals(stash["try_again"], False, "timedelta works")
time.sleep(3.0)
self.assertEquals(stash["try_again"], True, "repeated callback")
self.assertEquals(stash["arg1"], 2, "int works")
r.stop()
def test_incoming(self):
pass
def test_outgoing(self):
pass
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import unittest, threading
from rapidsms.router import Router
from rapidsms.backends.backend import Backend
from rapidsms.tests.harness import MockApp, MockLogger
class TestRouter(unittest.TestCase):
def test_log(self):
r = Router()
r.logger = MockLogger()
r.log("debug", "test message", 5)
self.assertEquals(r.logger[0], (r,"debug","test message",5),
"log() calls self.logger.write()")
def test_set_logger(self):
### TODO
pass
def test_build_component (self):
r = Router()
r.logger = MockLogger()
component = r.build_component("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app"})
self.assertEquals(type(component), MockApp, "component has right type")
self.assertEquals(component.name, "test app", "component has right title")
self.assertRaises(Exception, r.build_component,
("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app", "argh": "no config"}),
"build_component gracefully handles bad configuration options")
def test_add_backend (self):
r = Router()
r.logger = MockLogger()
r.add_backend({"type":"backend", "title":"test_backend"})
self.assertEquals(len(r.backends), 1, "backends has 1 item")
self.assertEquals(type(r.backends[0]), Backend, "backend has correct type")
def test_add_app (self):
### TODO
pass
def test_start_backend (self):
### TODO
pass
def test_start_all_apps (self):
### TODO
pass
def test_start_all_backends (self):
### TODO
pass
def test_stop_all_backends (self):
### TODO
pass
def test_start_and_stop (self):
pass
def test_run(self):
pass
def test_incoming(self):
pass
def test_outgoing(self):
pass
if __name__ == "__main__":
unittest.main()
| Python | 0 |
237b9d4577f004401c2385163b060c785692c8b6 | add when_over and when_over_guessed fields to Event (db change) | src/knesset/events/models.py | src/knesset/events/models.py | from datetime import datetime
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from knesset.persons.models import Person
class Event(models.Model):
''' hold the when, who, what, where and which fields of events
and allows the users to contribute resources (through links)
and discuss upcoming events.
'''
when = models.DateTimeField()
when_over = models.DateTimeField(null=True)
# KNESSET_TODO the end time of a committee meeting is not recorded anywhere,
# so we are left to guess
when_over_guessed = models.BooleanField(default=True)
who = models.ManyToManyField(Person)
what = models.TextField()
where = models.TextField()
which_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="event_for_%(class)s", null=True)
which_pk = models.TextField(_('object ID'), null=True)
which_object = generic.GenericForeignKey(ct_field="which_type", fk_field="which_pk")
@property
def is_future(self):
return self.when > datetime.now()
@property
def which(self):
return self.which_objects and unicode(self.which_object) or self.what
| from datetime import datetime
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from knesset.persons.models import Person
class Event(models.Model):
''' hold the when, who, what, where and which fields of events
and allows the users to contribute resources (through links)
and discuss upcoming events.
'''
when = models.DateTimeField()
who = models.ManyToManyField(Person)
what = models.TextField()
where = models.TextField()
which_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="event_for_%(class)s", null=True)
which_pk = models.TextField(_('object ID'), null=True)
which_object = generic.GenericForeignKey(ct_field="which_type", fk_field="which_pk")
@property
def is_future(self):
return self.when > datetime.now()
@property
def which(self):
return self.which_objects and unicode(self.which_object) or self.what
| Python | 0 |
d53dc67fc002448c7b94758843223a17d4623483 | Allow IP to be blank | lingcod/bookmarks/models.py | lingcod/bookmarks/models.py | from django.contrib.gis.db import models
from lingcod.features import register
from lingcod.features.models import Feature
from django.utils.html import escape
from django.conf import settings
class Bookmark(Feature):
description = models.TextField(default="", null=True, blank=True)
latitude = models.FloatField()
longitude = models.FloatField()
altitude = models.FloatField()
heading = models.FloatField(default=0)
tilt = models.FloatField(default=0)
roll = models.FloatField(default=0)
altitudeMode = models.FloatField(default=1)
ip = models.IPAddressField(default="0.0.0.0", null=True, blank=True)
publicstate = models.TextField(default="{}")
@property
def kml(self):
camera = "<Camera>\n"
camera_params = ["latitude", "longitude", "altitude", "heading", "tilt", "roll", "altitudeMode"]
for p in camera_params:
val = self.__dict__[p]
if val is not None:
camera += " <%s>%s</%s>\n" % (p, val, p)
camera += " </Camera>\n"
return """
<Placemark id="%s">
<visibility>1</visibility>
<name>%s</name>
<description>%s</description>
<styleUrl>#%s-default</styleUrl>
%s
</Placemark>
""" % (self.uid, escape(self.name), escape(self.description), self.model_uid(),
camera)
@property
def kml_style(self):
return """
<Style id="%s-default">
<!-- invisible -->
<IconStyle>
<scale>0.0</scale>
</IconStyle>
<LabelStyle>
<scale>0.0</scale>
</LabelStyle>
</Style>
""" % (self.model_uid())
class Options:
manipulators = []
optional_manipulators = [ ]
verbose_name = 'Bookmark'
form = 'lingcod.bookmarks.forms.BookmarkForm'
icon_url = 'bookmarks/images/bookmark.png'
form_template = 'bookmarks/form.html'
show_template = 'bookmarks/show.html'
if settings.BOOKMARK_FEATURE:
Bookmark = register(Bookmark)
| from django.contrib.gis.db import models
from lingcod.features import register
from lingcod.features.models import Feature
from django.utils.html import escape
from django.conf import settings
class Bookmark(Feature):
description = models.TextField(default="", null=True, blank=True)
latitude = models.FloatField()
longitude = models.FloatField()
altitude = models.FloatField()
heading = models.FloatField(default=0)
tilt = models.FloatField(default=0)
roll = models.FloatField(default=0)
altitudeMode = models.FloatField(default=1)
ip = models.IPAddressField(default="0.0.0.0")
publicstate = models.TextField(default="{}")
@property
def kml(self):
camera = "<Camera>\n"
camera_params = ["latitude", "longitude", "altitude", "heading", "tilt", "roll", "altitudeMode"]
for p in camera_params:
val = self.__dict__[p]
if val is not None:
camera += " <%s>%s</%s>\n" % (p, val, p)
camera += " </Camera>\n"
return """
<Placemark id="%s">
<visibility>1</visibility>
<name>%s</name>
<description>%s</description>
<styleUrl>#%s-default</styleUrl>
%s
</Placemark>
""" % (self.uid, escape(self.name), escape(self.description), self.model_uid(),
camera)
@property
def kml_style(self):
return """
<Style id="%s-default">
<!-- invisible -->
<IconStyle>
<scale>0.0</scale>
</IconStyle>
<LabelStyle>
<scale>0.0</scale>
</LabelStyle>
</Style>
""" % (self.model_uid())
class Options:
manipulators = []
optional_manipulators = [ ]
verbose_name = 'Bookmark'
form = 'lingcod.bookmarks.forms.BookmarkForm'
icon_url = 'bookmarks/images/bookmark.png'
form_template = 'bookmarks/form.html'
show_template = 'bookmarks/show.html'
if settings.BOOKMARK_FEATURE:
Bookmark = register(Bookmark)
| Python | 0.000009 |
d8d6ce50c6fef9157f76e1dfefef24d15532a4d9 | Add missing contexts to integration tests | test/integration/ggrc/__init__.py | test/integration/ggrc/__init__.py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Base test case for all ggrc integration tests."""
import logging
from sqlalchemy import exc
from flask.ext.testing import TestCase as BaseTestCase
from ggrc import db
from ggrc.app import app
# Hide errors during testing. Errors are still displayed after all tests are
# done. This is for the bad request error messages while testing the api calls.
logging.disable(logging.CRITICAL)
class TestCase(BaseTestCase):
# because it's required by unittests.
"""Base test case for all ggrc integration tests."""
maxDiff = None
@classmethod
def clear_data(cls):
"""Remove data from ggrc tables.
This is a helper function to remove any data that might have been generated
during a test. The ignored tables are the ones that don't exist or have
constant data in them, that was populated with migrations.
This function is used to speed up resetting of the database for each test.
the proper way would be to run all migrations on a fresh database, but that
would take too much time. This function should act as if the database was
just created, with the exception of autoincrement indexes.
Note:
The deletion is a hack because db.metadata.sorted_tables does not sort by
dependencies. The events table is given before Person table and reversed
order in then incorrect.
"""
ignore_tables = (
"categories",
"notification_types",
"object_types",
"options",
"relationship_test_mock_model",
"roles",
"test_model",
"contexts",
)
tables = set(db.metadata.tables).difference(ignore_tables)
for _ in range(len(tables)):
if len(tables) == 0:
break # stop the loop once all tables have been deleted
for table in reversed(db.metadata.sorted_tables):
if table.name in tables:
try:
db.engine.execute(table.delete())
tables.remove(table.name)
except exc.IntegrityError:
pass
contexts = db.metadata.tables["contexts"]
db.engine.execute(contexts.delete(contexts.c.id > 1))
db.session.commit()
def setUp(self):
self.clear_data()
def tearDown(self): # pylint: disable=no-self-use
db.session.remove()
@staticmethod
def create_app():
"""Flask specific function for running an app instance."""
app.config["SERVER_NAME"] = "localhost"
app.testing = True
app.debug = False
return app
def _check_response(self, response, expected_errors):
"""Test that response contains all expected errors and warnigs.
Args:
response: api response object.
expected_errors: dict of all expected errors by object type.
Raises:
AssertionError if an expected error or warning is not found in the
proper response block.
"""
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
for block in response:
for message in messages:
expected = expected_errors.get(block["name"], {}).get(message, set())
self.assertEqual(set(expected), set(block[message]))
| # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Base test case for all ggrc integration tests."""
import logging
from sqlalchemy import exc
from flask.ext.testing import TestCase as BaseTestCase
from ggrc import db
from ggrc.app import app
# Hide errors during testing. Errors are still displayed after all tests are
# done. This is for the bad request error messages while testing the api calls.
logging.disable(logging.CRITICAL)
class TestCase(BaseTestCase):
# because it's required by unittests.
"""Base test case for all ggrc integration tests."""
maxDiff = None
@classmethod
def clear_data(cls):
"""Remove data from ggrc tables.
This is a helper function to remove any data that might have been generated
during a test. The ignored tables are the ones that don't exist or have
constant data in them, that was populated with migrations.
Note:
This is a hack because db.metadata.sorted_tables does not sort by
dependencies. The events table is given before Person table and reversed
order in then incorrect.
"""
ignore_tables = (
"categories",
"notification_types",
"object_types",
"options",
"relationship_test_mock_model",
"roles",
"test_model",
)
tables = set(db.metadata.tables).difference(ignore_tables)
for _ in range(len(tables)):
if len(tables) == 0:
break # stop the loop once all tables have been deleted
for table in reversed(db.metadata.sorted_tables):
if table.name in tables:
try:
db.engine.execute(table.delete())
tables.remove(table.name)
except exc.IntegrityError:
pass
db.session.commit()
def setUp(self):
self.clear_data()
def tearDown(self): # pylint: disable=no-self-use
db.session.remove()
@staticmethod
def create_app():
"""Flask specific function for running an app instance."""
app.config["SERVER_NAME"] = "localhost"
app.testing = True
app.debug = False
return app
def _check_response(self, response, expected_errors):
"""Test that response contains all expected errors and warnigs.
Args:
response: api response object.
expected_errors: dict of all expected errors by object type.
Raises:
AssertionError if an expected error or warning is not found in the
proper response block.
"""
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
for block in response:
for message in messages:
expected = expected_errors.get(block["name"], {}).get(message, set())
self.assertEqual(set(expected), set(block[message]))
| Python | 0.000057 |
d137005229e180b509f0a2f83f5d2472b40d8890 | Set up Sentry if we're configured for it (so I don't lose this code again) | run.py | run.py | import os
from os.path import abspath, dirname, join
from makerbase import app
if 'MAKERBASE_SETTINGS' not in os.environ:
os.environ['MAKERBASE_SETTINGS'] = join(dirname(abspath(__file__)), 'settings.py')
app.config.from_envvar('MAKERBASE_SETTINGS')
if 'SENTRY_DSN' in app.config:
from raven.contrib.flask import Sentry
sentry = Sentry(app, dsn=app.config['SENTRY_DSN'])
if __name__ == '__main__':
app.run(debug=True)
| import os
from os.path import abspath, dirname, join
from makerbase import app
if 'MAKERBASE_SETTINGS' not in os.environ:
os.environ['MAKERBASE_SETTINGS'] = join(dirname(abspath(__file__)), 'settings.py')
app.config.from_envvar('MAKERBASE_SETTINGS')
if __name__ == '__main__':
app.run(debug=True)
| Python | 0 |
a4656021f6a97bf5ffccb3d6e522515769ba0d21 | Remove unnecessary calls to disable_continuous_mode | run.py | run.py | import argparse
import serial
import threading
from io import BufferedRWPair, TextIOWrapper
from time import sleep
temp_usb = '/dev/ttyAMA0'
BAUD_RATE = 9600
parser = argparse.ArgumentParser()
parser.add_argument('oxygen', help='The USB port of the oxygen sensor.')
parser.add_argument('salinity', help='The USB port of the salinity sensor.')
parser.add_argument('server_ip', help='The IP address of the lighthouse node.')
parser.add_argument('port', help='The port of the lighthouse node.')
def init_db():
# TODO: initialize the sqlite database.
pass
def create_connection(usb_port):
print('Creating connection on {}'.format(usb_port))
ser = serial.Serial(usb_port, BAUD_RATE)
# disable_continuous_mode(ser)
return TextIOWrapper(BufferedRWPair(ser, ser), newline='\r', encoding='ascii', line_buffering=True)
def disable_continuous_mode(conn: serial.Serial):
# TODO: research if we need to send this command every time we connect to the sensors, or if it only
# needs to be sent once to disable continuous mode. If only once we should move this code into a
# separate python file.
print('Disabling continuous mode...')
conn.write(bytes('E\r', 'ascii'))
if conn.inWaiting() > 0:
# clear the buffer if there is anything waiting.
print('Clearing buffer...')
conn.read(conn.inWaiting())
def save_data(temperature, salinity, oxygen):
# TODO save data to database (sqlite)
pass
def push_data(temperature, salinity, oxygen, server_ip, server_port):
payload = {'temperature': temperature, 'salinity': salinity, 'oxygen': oxygen}
# TODO push data to lighthouse node.
def initialize_serial_connections(oxy_usb, sal_usb):
temp_conn = create_connection(temp_usb)
sal_conn = create_connection(sal_usb)
oxy_conn = create_connection(oxy_usb)
return temp_conn, sal_conn, oxy_conn
def run_loop(oxy_usb, sal_usb, server_ip, server_port):
temp_conn, sal_conn, oxy_conn = initialize_serial_connections()
# TODO: Catch serial.serialutil.SerialException on read?
while True:
temp.write('R\r')
temp = temp_conn.readline()
sal.write('R\r')
sal = sal_conn.readline()
# TODO: send temp and sal to oxy sensor first, then retrieve oxy value.
# oxy.write(<salinity command here>)
# oxy.write(<temp command here>)
oxy.write('R\r')
oxy = oxy_conn.readline()
print('Temperature: {}, Dissolved Oxygen: {}, Salinity: {}'.format(temp, oxy, sal))
save_data(temp, oxy, sal)
push_data(temp, oxy, sal, server_ip, server_port)
# TODO: Determine how often we should be grabbing data from sensors and pushing to other pi node.
time.sleep(5)
if __name__ == '__main__':
# TODO: Create supervisord script to keep run.py running.
# TODO: Parse command line args for database connection info.
args = parser.parse_args()
run_loop(args.oxygen, args.salinity, args.server_ip, args.port)
| import argparse
import serial
import threading
from io import BufferedRWPair, TextIOWrapper
from time import sleep
temp_usb = '/dev/ttyAMA0'
BAUD_RATE = 9600
parser = argparse.ArgumentParser()
parser.add_argument('oxygen', help='The USB port of the oxygen sensor.')
parser.add_argument('salinity', help='The USB port of the salinity sensor.')
parser.add_argument('server_ip', help='The IP address of the lighthouse node.')
parser.add_argument('port', help='The port of the lighthouse node.')
def init_db():
# TODO: initialize the sqlite database.
pass
def create_connection(usb_port):
print('Creating connection on {}'.format(usb_port))
ser = serial.Serial(usb_port, BAUD_RATE)
# disable_continuous_mode(ser)
return TextIOWrapper(BufferedRWPair(ser, ser), newline='\r', encoding='ascii', line_buffering=True)
def disable_continuous_mode(conn: serial.Serial):
# TODO: research if we need to send this command every time we connect to the sensors, or if it only
# needs to be sent once to disable continuous mode. If only once we should move this code into a
# separate python file.
print('Disabling continuous mode...')
conn.write(bytes('E\r', 'ascii'))
if conn.inWaiting() > 0:
# clear the buffer if there is anything waiting.
print('Clearing buffer...')
conn.read(conn.inWaiting())
def save_data(temperature, salinity, oxygen):
# TODO save data to database (sqlite)
pass
def push_data(temperature, salinity, oxygen, server_ip, server_port):
payload = {'temperature': temperature, 'salinity': salinity, 'oxygen': oxygen}
# TODO push data to lighthouse node.
def initialize_serial_connections(oxy_usb, sal_usb):
temp_conn = create_connection(temp_usb)
sal_conn = create_connection(sal_usb)
oxy_conn = create_connection(oxy_usb)
disable_continuous_mode(temp_conn)
disable_continuous_mode(sal_conn)
disable_continuous_mode(oxy_conn)
return temp_conn, sal_conn, oxy_conn
def run_loop(oxy_usb, sal_usb, server_ip, server_port):
temp_conn, sal_conn, oxy_conn = initialize_serial_connections()
# TODO: Catch serial.serialutil.SerialException on read?
while True:
temp.write('R\r')
temp = temp_conn.readline()
sal.write('R\r')
sal = sal_conn.readline()
# TODO: send temp and sal to oxy sensor first, then retrieve oxy value.
# oxy.write(<salinity command here>)
# oxy.write(<temp command here>)
oxy.write('R\r')
oxy = oxy_conn.readline()
print('Temperature: {}, Dissolved Oxygen: {}, Salinity: {}'.format(temp, oxy, sal))
save_data(temp, oxy, sal)
push_data(temp, oxy, sal, server_ip, server_port)
# TODO: Determine how often we should be grabbing data from sensors and pushing to other pi node.
time.sleep(5)
if __name__ == '__main__':
# TODO: Create supervisord script to keep run.py running.
# TODO: Parse command line args for database connection info.
args = parser.parse_args()
run_loop(args.oxygen, args.salinity, args.server_ip, args.port)
| Python | 0.000003 |
68d465988378f24e74f8dd098919031d3fcfa2f4 | fix source reinsertion bug | run.py | run.py | import spider
import sys
import os
import json
'''
requires spider.py be in the same directory as this module
spider.py can be found at http://github.com/shariq/notion-on-firebase
'''
def get_firebase_json_path(firebase_path):
return os.path.abspath(os.path.join(firebase_path, 'firebase.json'))
def add_to_firebase_json(firebase_path, new_rewrites):
firebase_json_path = get_firebase_json_path(firebase_path)
with open(firebase_json_path) as handle:
firebase_json = json.loads(handle.read())
if 'rewrites' not in firebase_json['hosting']:
firebase_json['hosting']['rewrites'] = []
existing_rewrites = firebase_json['hosting']['rewrites']
for new_rewrite in new_rewrites:
for existing_rewrite in existing_rewrites[:]:
if existing_rewrite['destination'] == new_rewrite['destination']:
print 'warning: removing', existing_rewrite
existing_rewrites.remove(existing_rewrite)
elif existing_rewrite['source'] == new_rewrite['source']:
print 'warning: removing', existing_rewrite
existing_rewrites.remove(existing_rewrite)
existing_rewrites.append(new_rewrite)
firebase_json['hosting']['rewrites'] = existing_rewrites
dumped = json.dumps(firebase_json, indent=4)
with open(firebase_json_path, 'w') as handle:
handle.write(dumped)
def get_firebase_public_path(firebase_path):
firebase_json_path = get_firebase_json_path(firebase_path)
with open(firebase_json_path) as handle:
contents = handle.read()
relative_public = json.loads(contents)['hosting']['public']
return os.path.join(firebase_path, relative_public)
def main(root_page, firebase_path):
print 'root_page:', root_page
print 'firebase_path:', firebase_path
firebase_public_path = get_firebase_public_path(firebase_path)
print 'firebase_public_path:', firebase_public_path
print 'beginning spider...'
rewrites = spider.run(root_page, firebase_public_path)
print 'completed spider'
print 'rewrites:', rewrites
add_to_firebase_json(firebase_path, rewrites)
original_path = os.getcwd()
os.chdir(firebase_path)
print 'deploying...'
os.system('firebase deploy')
os.chdir(original_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'usage: python run.py <root_notion_page_id> <firebase_path>'
print 'e.g, python run.py d065149ff38a4e7a9b908aeb262b0f4f ../firebase'
sys.exit(-1)
firebase_path = sys.argv[-1]
if not os.path.exists(firebase_path):
print 'error: that firebase_path could not be found. '
print '(path evaluated to {})'.format(os.path.abspath(firebase_path))
sys.exit(-1)
firebase_public_path = get_firebase_public_path(firebase_path)
if not os.path.exists(os.path.join(firebase_public_path, 'ga.js')):
print 'warning: ga.js was not found in your firebase public path'
print 'hit enter after placing it there or if you don\'t want ga.js'
print '(hint: this is a JS file from Google Analytics)'
raw_input()
root_page = sys.argv[-2]
main(root_page, firebase_path)
| import spider
import sys
import os
import json
'''
requires spider.py be in the same directory as this module
spider.py can be found at http://github.com/shariq/notion-on-firebase
'''
def get_firebase_json_path(firebase_path):
return os.path.abspath(os.path.join(firebase_path, 'firebase.json'))
def add_to_firebase_json(firebase_path, new_rewrites):
firebase_json_path = get_firebase_json_path(firebase_path)
with open(firebase_json_path) as handle:
firebase_json = json.loads(handle.read())
if 'rewrites' not in firebase_json['hosting']:
firebase_json['hosting']['rewrites'] = []
existing_rewrites = firebase_json['hosting']['rewrites']
for new_rewrite in new_rewrites:
for existing_rewrite in existing_rewrites[:]:
if existing_rewrite['destination'] == new_rewrite['destination']:
if existing_rewrite['source'] == new_rewrite['source']:
continue
print 'warning: removing', existing_rewrite
existing_rewrites.remove(existing_rewrite)
elif existing_rewrite['source'] == new_rewrite['source']:
print 'warning: removing', existing_rewrite
existing_rewrites.remove(existing_rewrite)
existing_rewrites.append(new_rewrite)
firebase_json['hosting']['rewrites'] = existing_rewrites
dumped = json.dumps(firebase_json, indent=4)
with open(firebase_json_path, 'w') as handle:
handle.write(dumped)
def get_firebase_public_path(firebase_path):
firebase_json_path = get_firebase_json_path(firebase_path)
with open(firebase_json_path) as handle:
contents = handle.read()
relative_public = json.loads(contents)['hosting']['public']
return os.path.join(firebase_path, relative_public)
def main(root_page, firebase_path):
print 'root_page:', root_page
print 'firebase_path:', firebase_path
firebase_public_path = get_firebase_public_path(firebase_path)
print 'firebase_public_path:', firebase_public_path
print 'beginning spider...'
rewrites = spider.run(root_page, firebase_public_path)
print 'completed spider'
print 'rewrites:', rewrites
add_to_firebase_json(firebase_path, rewrites)
original_path = os.getcwd()
os.chdir(firebase_path)
print 'deploying...'
os.system('firebase deploy')
os.chdir(original_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'usage: python run.py <root_notion_page_id> <firebase_path>'
print 'e.g, python run.py d065149ff38a4e7a9b908aeb262b0f4f ../firebase'
sys.exit(-1)
firebase_path = sys.argv[-1]
if not os.path.exists(firebase_path):
print 'error: that firebase_path could not be found. '
print '(path evaluated to {})'.format(os.path.abspath(firebase_path))
sys.exit(-1)
firebase_public_path = get_firebase_public_path(firebase_path)
if not os.path.exists(os.path.join(firebase_public_path, 'ga.js')):
print 'warning: ga.js was not found in your firebase public path'
print 'hit enter after placing it there or if you don\'t want ga.js'
print '(hint: this is a JS file from Google Analytics)'
raw_input()
root_page = sys.argv[-2]
main(root_page, firebase_path)
| Python | 0 |
1c8a1bfeef8206267a45562d4932cece1cbea1b4 | Fix some pylint issues | Trie.py | Trie.py | #! /usr/bin/env python
# vim: set encoding=utf-8
from ctypes import cdll, c_char_p, c_void_p, create_string_buffer
libtrie = cdll.LoadLibrary("./libtrie.so")
libtrie.trie_load.argtypes = [c_char_p]
libtrie.trie_load.restype = c_void_p
libtrie.trie_lookup.argtypes = [c_void_p, c_char_p, c_char_p]
libtrie.trie_lookup.restype = c_void_p
libtrie.trie_get_last_error.restype = c_char_p
class Trie(object):
def __init__(self, filename):
self.free_func = libtrie.trie_free
self.ptr = libtrie.trie_load(filename)
if self.ptr == 0:
err = libtrie.trie_get_last_error()
raise IOError(str(err))
def __del__(self):
if self:
self.free_func(self.ptr)
def lookup(self, key):
s = create_string_buffer('\000' * 256)
res = libtrie.trie_lookup(self.ptr, key, s)
if res:
return [s.decode('utf8') for s in s.value.split('\n')]
else:
return []
def test_main():
"""
This function creates a storage backed by a file and tests it by retrieving
a couple of records.
"""
import sys
t = Trie('prijmeni5.trie')
for name in sys.stdin.readlines():
name = name.strip()
for s in t.lookup(name):
print s
if __name__ == '__main__':
test_main()
| #! /usr/bin/env python
# vim: set encoding=utf-8
from ctypes import *
libtrie = cdll.LoadLibrary("./libtrie.so")
libtrie.trie_load.argtypes = [c_char_p]
libtrie.trie_load.restype = c_void_p
libtrie.trie_lookup.argtypes = [ c_void_p, c_char_p, c_char_p ]
libtrie.trie_lookup.restype = c_void_p
libtrie.trie_get_last_error.restype = c_char_p
class Trie(object):
def __init__(self, filename):
self.free_func = libtrie.trie_free
self.ptr = libtrie.trie_load(filename)
if self.ptr == 0:
err = libtrie.trie_get_last_error()
raise IOError(str(err))
def __del__(self):
if self:
self.free_func(self.ptr)
def lookup(self, key):
s = create_string_buffer('\000' * 256)
res = libtrie.trie_lookup(self.ptr, key, s)
if res:
return [s.decode('utf8') for s in s.value.split('\n')]
else:
return []
def test_main():
"""
This function creates a storage backed by a file and tests it by retrieving
a couple of records.
"""
import sys
t = Trie('prijmeni5.trie')
for name in sys.stdin.readlines():
name = name.strip()
for s in t.lookup(name):
print s
if __name__ == '__main__':
test_main()
| Python | 0.000077 |
e62db9661295ff3912dbaaaff0d9f267f0b7ffe1 | Add url callback on custom login | auth.py | auth.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle.ext import auth
from utils import conf
try:
auth_import = conf('auth')['engine'].split('.')[-1]
auth_from = u".".join(conf('auth')['engine'].split('.')[:-1])
auth_engine = getattr(__import__(auth_from, fromlist=[auth_import]),
auth_import)
except:
print 'Set valid auth engine'
exit(0)
callback = u"{}://{}".format(
conf('openmining')['protocol'],
conf('openmining')['domain'])
if conf('openmining')['domain_port'] not in ['80', '443']:
callback = "{}:{}".format(callback, conf('openmining')['domain_port'])
if auth_import == 'Google':
engine = auth_engine(
conf('auth')['key'], conf('auth')['secret'], callback)
elif auth_import == 'Facebook':
# Not working requered parans
engine = auth_engine()
elif auth_import == 'Twitter':
# Not working requered parans
engine = auth_engine()
else:
engine = auth_engine(callback_url=callback)
auth = auth.AuthPlugin(engine)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle.ext import auth
from utils import conf
try:
auth_import = conf('auth')['engine'].split('.')[-1]
auth_from = u".".join(conf('auth')['engine'].split('.')[:-1])
auth_engine = getattr(__import__(auth_from, fromlist=[auth_import]),
auth_import)
except:
print 'Set valid auth engine'
exit(0)
callback = u"{}://{}".format(
conf('openmining')['protocol'],
conf('openmining')['domain'])
if conf('openmining')['domain_port'] not in ['80', '443']:
callback = "{}:{}".format(callback, conf('openmining')['domain_port'])
if auth_import == 'Google':
engine = auth_engine(
conf('auth')['key'], conf('auth')['secret'], callback)
elif auth_import == 'Facebook':
# Not working requered parans
engine = auth_engine()
elif auth_import == 'Twitter':
# Not working requered parans
engine = auth_engine()
else:
engine = auth_engine()
auth = auth.AuthPlugin(engine)
| Python | 0 |
95723719050aa08119ed2478c0bb40253a2b0b3e | Remove methods with unnecessary super delegation. | libqtile/layout/max.py | libqtile/layout/max.py | # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
# Copyright (c) 2017, Dirk Hartmann.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile.layout.base import _SimpleLayoutBase
class Max(_SimpleLayoutBase):
"""Maximized layout
A simple layout that only displays one window at a time, filling the
screen_rect. This is suitable for use on laptops and other devices with
small screens. Conceptually, the windows are managed as a stack, with
commands to switch to next and previous windows in the stack.
"""
defaults = [("name", "max", "Name of this layout.")]
def __init__(self, **config):
super().__init__(**config)
self.add_defaults(Max.defaults)
def add(self, client):
return super().add(client, 1)
def configure(self, client, screen_rect):
if self.clients and client is self.clients.current_client:
client.place(
screen_rect.x,
screen_rect.y,
screen_rect.width,
screen_rect.height,
0,
None
)
client.unhide()
else:
client.hide()
cmd_previous = _SimpleLayoutBase.previous
cmd_next = _SimpleLayoutBase.next
cmd_up = cmd_previous
cmd_down = cmd_next
| # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
# Copyright (c) 2017, Dirk Hartmann.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile.layout.base import _SimpleLayoutBase
class Max(_SimpleLayoutBase):
"""Maximized layout
A simple layout that only displays one window at a time, filling the
screen_rect. This is suitable for use on laptops and other devices with
small screens. Conceptually, the windows are managed as a stack, with
commands to switch to next and previous windows in the stack.
"""
defaults = [("name", "max", "Name of this layout.")]
def __init__(self, **config):
super().__init__(**config)
self.add_defaults(Max.defaults)
def clone(self, group):
return super().clone(group)
def add(self, client):
return super().add(client, 1)
def configure(self, client, screen_rect):
if self.clients and client is self.clients.current_client:
client.place(
screen_rect.x,
screen_rect.y,
screen_rect.width,
screen_rect.height,
0,
None
)
client.unhide()
else:
client.hide()
cmd_previous = _SimpleLayoutBase.previous
cmd_next = _SimpleLayoutBase.next
cmd_up = cmd_previous
cmd_down = cmd_next
| Python | 0 |
b5ee1f3dfccd3a18698ada03442854479e406d37 | Update expression-add-operators.py | Python/expression-add-operators.py | Python/expression-add-operators.py | # Time: O(3^n)
# Space: O(n)
#
# Given a string that contains only digits 0-9
# and a target value, return all possibilities
# to add operators +, -, or * between the digits
# so they evaluate to the target value.
#
# Examples:
# "123", 6 -> ["1+2+3", "1*2*3"]
# "232", 8 -> ["2*3+2", "2+3*2"]
# "00", 0 -> ["0+0", "0-0", "0*0"]
# "3456237490", 9191 -> []
#
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
result, expr = [], []
self.addOperatorsDFS(num, target, 0, 0, 0, expr, result)
return result
def addOperatorsDFS(self, s, target, pos, operand1, operand2, expr, result):
# Base Case 1
if pos == len(s):
if operand1 + operand2 == target:
e = "".join(expr)
e = e[1:] if e[0] == '+' else e
result.append(e)
return True
return False
num, i = 0, pos
num_str = ""
while i < len(s):
num_str += s[i]
num = num * 10 + ord(s[i]) - ord('0')
# Case '+':
expr.append("+"), expr.append(num_str)
self.addOperatorsDFS(s, target, i + 1, operand1 + operand2, num, expr, result)
expr.pop(), expr.pop()
# '-' and '*' could be used only if the expression is not empty.
if expr:
# Case '-':
expr.append("-"), expr.append(num_str)
self.addOperatorsDFS(s, target, i + 1, operand1 + operand2, -num, expr, result)
expr.pop(), expr.pop()
# Case '*':
expr.append("*"), expr.append(num_str)
self.addOperatorsDFS(s, target, i + 1, operand1, operand2 * num, expr, result)
expr.pop(), expr.pop()
# Char is '0'.
if num == 0:
break
i += 1
| # Time: O(3^n)
# Space: O(n)
#
# Given a string that contains only digits 0-9
# and a target value, return all possibilities
# to add operators +, -, or * between the digits
# so they evaluate to the target value.
#
# Examples:
# "123", 6 -> ["1+2+3", "1*2*3"]
# "232", 8 -> ["2*3+2", "2+3*2"]
# "00", 0 -> ["0+0", "0-0", "0*0"]
# "3456237490", 9191 -> []
#
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
result, expr = [], []
self.addOperatorsDFS(num, target, 0, 0, 0, expr, result)
return result
def addOperatorsDFS(self, s, target, pos, operand1, operand2, expr, result):
# Base Case 1
if pos == len(s):
if operand1 + operand2 == target:
e = "".join(expr)
e = e[1:] if e[0] == '+' else e
result.append(e)
return True
return False
num, i = 0, pos
num_str = ""
while i < len(s):
num_str += s[i]
num = num * 10 + ord(s[i]) - ord('0')
# Case '+':
expr.append("+"), expr.append(num_str)
self.addOperatorsDFS(s, target, i + 1, operand1 + operand2, num, expr, result)
expr.pop(), expr.pop()
# '-' and '*' could be used only if the expression is not empty.
if expr:
# Case '-':
expr.append("-"), expr.append(num_str)
self.addOperatorsDFS(s, target, i + 1, operand1 + operand2, -num, expr, result)
expr.pop(), expr.pop()
# Case '*':
expr.append("*"), expr.append(num_str)
self.addOperatorsDFS(s, target, i + 1, operand1, operand2 * num, expr, result)
expr.pop(), expr.pop()
# Char is '0'.
if num == 0:
break
i += 1
| Python | 0.000002 |
5ebf34e1c572e5db9012af4228eaca2a8461b8d9 | add some extra debug logging to smr-reduce | smr/reduce.py | smr/reduce.py | #!/usr/bin/env python
import sys
from .shared import get_config, configure_logging
def main():
if len(sys.argv) < 2:
sys.stderr.write("usage: smr-reduce config.py\n")
sys.exit(1)
config = get_config(sys.argv[1])
configure_logging(config)
try:
for result in iter(sys.stdin.readline, ""):
result = result.rstrip() # remove trailing linebreak
logging.debug("smr-reduce got %s", result)
config.REDUCE_FUNC(result)
except (KeyboardInterrupt, SystemExit):
# we want to output results even if user aborted
config.OUTPUT_RESULTS_FUNC()
else:
config.OUTPUT_RESULTS_FUNC()
| #!/usr/bin/env python
import sys
from .shared import get_config, configure_logging
def main():
if len(sys.argv) < 2:
sys.stderr.write("usage: smr-reduce config.py\n")
sys.exit(1)
config = get_config(sys.argv[1])
configure_logging(config)
try:
for result in iter(sys.stdin.readline, ""):
config.REDUCE_FUNC(result.rstrip()) # remove trailing linebreak
except (KeyboardInterrupt, SystemExit):
# we want to output results even if user aborted
config.OUTPUT_RESULTS_FUNC()
else:
config.OUTPUT_RESULTS_FUNC()
| Python | 0 |
dd020b279f011ff78a6a41571a839e4c57333e93 | Rename username field to userspec (#196). | devilry/apps/core/models/relateduser.py | devilry/apps/core/models/relateduser.py | import re
from django.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
from period import Period
from node import Node
from abstract_is_admin import AbstractIsAdmin
class RelatedUserBase(models.Model, AbstractIsAdmin):
"""
Base class for :cls:`RelatedExaminer` and cls:`RelatedStudent`.
This is used to generate AssignmentGroups and
.. attribute:: userspec
One or more usernames followed by optional tags. Format: usernameA, ...., usernameN (tag1, tag2, ..., tagN).
For RelatedExaminer, only a single username is allowed.
"""
usersandtags_patt = r'((?:\w+\s*,\s*)*\w+)\s*\(((?:\w+\s*,\s*)*\w+)\)$'
userspec = models.CharField(max_length=200,
help_text='One or more usernames followed by optional tags. Format: usernameA, ...., usernameN (tag1, tag2, ..., tagN). For RelatedExaminer, only a single username is allowed.')
class Meta:
abstract = True # This model will then not be used to create any database table. Instead, when it is used as a base class for other models, its fields will be added to those of the child class.
unique_together = ('period', 'userspec')
app_label = 'core'
@classmethod
def q_is_admin(cls, user_obj):
return Q(period__admins=user_obj) | \
Q(period__parentnode__admins=user_obj) | \
Q(period__parentnode__parentnode__pk__in=Node._get_nodepks_where_isadmin(user_obj))
def clean(self, *args, **kwargs):
super(RelatedUserBase, self).clean(*args, **kwargs)
if not self.patt.match(self.userspec):
raise ValidationError('Invaid related user.')
def __unicode__(self):
return '{0}:{1}'.format(self.period, self.userspec)
class RelatedExaminer(RelatedUserBase):
"""
.. attribute:: period
A django.db.models.ForeignKey_ that points to the `Period`_.
"""
patt = re.compile('^' + RelatedUserBase.usersandtags_patt)
period = models.ForeignKey(Period, related_name='relatedexaminers',
help_text='The related period.')
class RelatedStudent(RelatedUserBase):
"""
.. attribute:: period
A django.db.models.ForeignKey_ that points to the `Period`_.
"""
patt = re.compile(r'^(?:(.+?)\s*::\s*)?' + RelatedUserBase.usersandtags_patt)
period = models.ForeignKey(Period, related_name='relatedstudents',
help_text='The related period.')
| import re
from django.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
from period import Period
from node import Node
from abstract_is_admin import AbstractIsAdmin
class RelatedUserBase(models.Model, AbstractIsAdmin):
"""
Base class for :cls:`RelatedExaminer` and cls:`RelatedStudent`.
This is used to generate AssignmentGroups and
.. attribute:: username
One or more usernames followed by optional tags. Format: usernameA, ...., usernameN (tag1, tag2, ..., tagN).
For RelatedExaminer, only a single username is allowed.
"""
usersandtags_patt = r'((?:\w+\s*,\s*)*\w+)\s*\(((?:\w+\s*,\s*)*\w+)\)$'
username = models.CharField(max_length=200,
help_text='One or more usernames followed by optional tags. Format: usernameA, ...., usernameN (tag1, tag2, ..., tagN). For RelatedExaminer, only a single username is allowed.')
class Meta:
abstract = True # This model will then not be used to create any database table. Instead, when it is used as a base class for other models, its fields will be added to those of the child class.
unique_together = ('period', 'username')
app_label = 'core'
@classmethod
def q_is_admin(cls, user_obj):
return Q(period__admins=user_obj) | \
Q(period__parentnode__admins=user_obj) | \
Q(period__parentnode__parentnode__pk__in=Node._get_nodepks_where_isadmin(user_obj))
def clean(self, *args, **kwargs):
super(RelatedUserBase, self).clean(*args, **kwargs)
if not self.patt.match(self.username):
raise ValidationError('Invaid related user.')
def __unicode__(self):
return '{0}:{1}'.format(self.period, self.username)
class RelatedExaminer(RelatedUserBase):
"""
.. attribute:: period
A django.db.models.ForeignKey_ that points to the `Period`_.
"""
patt = re.compile('^' + RelatedUserBase.usersandtags_patt)
period = models.ForeignKey(Period, related_name='relatedexaminers',
help_text='The related period.')
class RelatedStudent(RelatedUserBase):
"""
.. attribute:: period
A django.db.models.ForeignKey_ that points to the `Period`_.
"""
patt = re.compile(r'^(?:(.+?)\s*::\s*)?' + RelatedUserBase.usersandtags_patt)
period = models.ForeignKey(Period, related_name='relatedstudents',
help_text='The related period.')
| Python | 0 |
4b330755edab7a57de6d39a7e365c5f79df81065 | Update config.py | blaspy/config.py | blaspy/config.py | """
Copyright (c) 2014, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from .errors import raise_blas_os_error
from ctypes import cdll
from os import chdir, path
from platform import system
from struct import calcsize
# The name of the BLAS .so or .dll file. By default this is the OpenBLAS reference
# implementation bundled with BLASpy. Only modify if you wish to use a different version of BLAS
# or if your operating system is not supported by BLASpy out of the box.
BLAS_NAME = "" # default is ""
# True if the BLAS .so or .dll file is in the blaspy/lib subdirectory,
# False if Python should search for it.
IN_BLASPY_SUBDIRECTORY = True # default is True
###############################
# DO NOT EDIT BELOW THIS LINE #
###############################
# find the appropriate BLAS to use
if BLAS_NAME == "": # try to use included OpenBLAS
PREPEND = str(path.dirname(__file__))[:-6] + "lib/"
if system() == "Windows":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-win64-int32.dll"
chdir(PREPEND + "win64")
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-win32.dll"
chdir(PREPEND + "win32")
PREPEND = ""
elif system() == "Linux":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-linux64.so"
PREPEND += "linux64/"
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-linux32.so"
PREPEND += "linux32/"
else: # no appropriate OpenBLAS included, BLAS_NAME_OVERRIDE must be used
raise_blas_os_error()
else:
PREPEND = ""
# Change the directory and load the library
_libblas = cdll.LoadLibrary(PREPEND + BLAS_NAME) | """
Copyright (c) 2014, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from .errors import raise_blas_os_error
from ctypes import cdll
from os import chdir, path
from platform import system
from struct import calcsize
# The name of the BLAS .so or .dll file. By default this is the OpenBLAS reference
# implementation bundled with BLASpy. Only modify if you wish to use a different version of BLAS
# or if your operating system is not supported by BLASpy out of the box.
BLAS_NAME = "" # default is ""
# True if the BLAS .so or .dll file is in the blaspy/lib subdirectory,
# False if Python should search for it.
IN_BLASPY_SUBDIRECTORY = True # default is True
###############################
# DO NOT EDIT BELOW THIS LINE #
###############################
# find the appropriate BLAS to use
if BLAS_NAME == "": # try to use included OpenBLAS
if system() == "Windows":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-win64-int32.dll"
SUB_DIRECTORY = "win64"
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-win32.dll"
SUB_DIRECTORY = "win32"
elif system() == "Linux":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-linux64.so"
SUB_DIRECTORY = "linux64"
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-linux32.so"
SUB_DIRECTORY = "linux32"
else: # no appropriate OpenBLAS included, BLAS_NAME_OVERRIDE must be used
raise_blas_os_error()
else:
SUB_DIRECTORY = ""
# Change the directory and load the library
if IN_BLASPY_SUBDIRECTORY:
chdir(str(path.dirname(__file__))[:-6] + "lib/" + SUB_DIRECTORY)
_libblas = cdll.LoadLibrary(BLAS_NAME) | Python | 0 |
290a1f0cb301a6a4f4be2e218e8d97a5644cc2d3 | Remove old Ensembl domain | rnacentral_pipeline/databases/ensembl/metadata/karyotypes.py | rnacentral_pipeline/databases/ensembl/metadata/karyotypes.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import json
import itertools as it
import requests
from retry import retry
from ratelimiter import RateLimiter
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
import six
DOMAINS = {
'ensembl',
}
@lru_cache()
@retry(requests.HTTPError, tries=5, delay=1)
@RateLimiter(max_calls=15, period=1)
def find_species(domain):
response = requests.get(
'http://rest.%s.org/info/species' % domain,
headers={'Content-Type': 'application/json'}
)
response.raise_for_status()
species = []
raw = response.json()
for entry in raw['species']:
species.append(entry['name'])
return species
@lru_cache()
@retry(requests.HTTPError, tries=5, delay=1)
@RateLimiter(max_calls=15, period=1)
def fetch(species, domain):
response = requests.get(
'http://rest.%s.org/info/assembly/%s?bands=1' % (domain, species),
headers={'Content-Type': 'application/json'}
)
response.raise_for_status()
return response.json()
def default_bands(entry):
return {
"size": entry["length"],
"bands": [{
"start": 1,
"end": entry["length"]
}]
}
def process_chromosome(entry):
if 'bands' not in entry:
return default_bands(entry)
bands = []
for band in entry["bands"]:
bands.append({
"id": band["id"],
"start": band["start"],
"end": band["end"],
"type": band["stain"]
})
return {
"size": entry["length"],
"bands": bands
}
def process(raw):
result = {}
for entry in raw["top_level_region"]:
result[entry["name"]] = default_bands(entry)
if entry["coord_system"] == "chromosome":
result[entry['name']] = process_chromosome(entry)
return raw['default_coord_system_version'], result
def for_domain(domain, allowed=None):
for species in find_species(domain):
if not species or (allowed and species in allowed):
raw_data = fetch(species, domain)
yield process(raw_data)
def data(species=None):
results = six.moves.map(lambda d: for_domain(d, allowed=species), DOMAINS)
return it.chain.from_iterable(results)
def write(output, species=None):
writer = csv.writer(output)
for (assembly_id, bands) in data(species=species):
writer.writerow([assembly_id, json.dumps(bands)])
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import json
import itertools as it
import requests
from retry import retry
from ratelimiter import RateLimiter
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
import six
DOMAINS = {
'ensemblgenomes',
'ensembl',
}
@lru_cache()
@retry(requests.HTTPError, tries=5, delay=1)
@RateLimiter(max_calls=15, period=1)
def find_species(domain):
response = requests.get(
'http://rest.%s.org/info/species' % domain,
headers={'Content-Type': 'application/json'}
)
response.raise_for_status()
species = []
raw = response.json()
for entry in raw['species']:
species.append(entry['name'])
return species
@lru_cache()
@retry(requests.HTTPError, tries=5, delay=1)
@RateLimiter(max_calls=15, period=1)
def fetch(species, domain):
response = requests.get(
'http://rest.%s.org/info/assembly/%s?bands=1' % (domain, species),
headers={'Content-Type': 'application/json'}
)
response.raise_for_status()
return response.json()
def default_bands(entry):
return {
"size": entry["length"],
"bands": [{
"start": 1,
"end": entry["length"]
}]
}
def process_chromosome(entry):
if 'bands' not in entry:
return default_bands(entry)
bands = []
for band in entry["bands"]:
bands.append({
"id": band["id"],
"start": band["start"],
"end": band["end"],
"type": band["stain"]
})
return {
"size": entry["length"],
"bands": bands
}
def process(raw):
result = {}
for entry in raw["top_level_region"]:
result[entry["name"]] = default_bands(entry)
if entry["coord_system"] == "chromosome":
result[entry['name']] = process_chromosome(entry)
return raw['default_coord_system_version'], result
def for_domain(domain, allowed=None):
for species in find_species(domain):
if not species or (allowed and species in allowed):
raw_data = fetch(species, domain)
yield process(raw_data)
def data(species=None):
results = six.moves.map(lambda d: for_domain(d, allowed=species), DOMAINS)
return it.chain.from_iterable(results)
def write(output, species=None):
writer = csv.writer(output)
for (assembly_id, bands) in data(species=species):
writer.writerow([assembly_id, json.dumps(bands)])
| Python | 0 |
e4b2f5eed4c169792812ee82fa1f65cdc9516fb0 | Add first/lastname to project search | lims/projects/views.py | lims/projects/views.py | import django_filters
from rest_framework import viewsets
from rest_framework.validators import ValidationError
from rest_framework.filters import (OrderingFilter,
SearchFilter,
DjangoFilterBackend)
from guardian.shortcuts import get_group_perms
from lims.shared.filters import ListFilter
from lims.permissions.permissions import (IsInAdminGroupOrRO,
ViewPermissionsMixin,
ExtendedObjectPermissions,
ExtendedObjectPermissionsFilter)
from .models import (Product, ProductStatus, Project)
from .serializers import (ProjectSerializer, ProductSerializer,
DetailedProductSerializer, ProductStatusSerializer)
from .parsers import DesignFileParser
class ProjectViewSet(ViewPermissionsMixin, viewsets.ModelViewSet):
"""
View all projects the user has permissions for
Projects are filtered by permissions and users cannot see any
projects they do not have permissions for.
"""
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = (ExtendedObjectPermissions,)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
search_fields = ('project_identifier', 'name', 'primary_lab_contact__username',
'crm_project__account__user__first_name',
'crm_project__account__user__last_name',)
def perform_create(self, serializer):
serializer, permissions = self.clean_serializer_of_permissions(serializer)
instance = serializer.save(created_by=self.request.user)
self.assign_permissions(instance, permissions)
class ProductFilter(django_filters.FilterSet):
# on_workflow_as = django_filters.MethodFilter()
id__in = ListFilter(name='id')
def filter_on_workflow_as(self, queryset, value):
if value == 'False':
return queryset.filter(on_workflow_as__isnull=True)
elif value == 'True':
return queryset.filter(on_workflow_as__isnull=False)
return queryset
class Meta:
model = Product
fields = {
'id': ['exact', 'in'],
'project': ['exact'],
'status': ['exact'],
# 'on_workflow_as': ['exact'],
}
class ProductViewSet(ViewPermissionsMixin, viewsets.ModelViewSet):
"""
Provides a list of all products
"""
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = (ExtendedObjectPermissions,)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
search_fields = ('product_identifier', 'name',)
filter_class = ProductFilter
def _parse_design(self, instance):
"""
Takes a design file and extracts the necessary info
out to add inventory items or other things.
"""
if instance.design is not None:
items = []
parser = DesignFileParser(instance.design)
if instance.design_format == 'csv':
items = parser.parse_csv()
elif instance.design_format == 'gb':
items = parser.parse_gb()
for i in items:
instance.linked_inventory.add(i)
def get_serializer_class(self):
# Use a more compact serializer when listing.
# This makes things run more efficiantly.
if self.action == 'retrieve':
return DetailedProductSerializer
return ProductSerializer
def perform_create(self, serializer):
# Ensure the user has the correct permissions on the Project
# to add a product to it.
project = serializer.validated_data['project']
if ('change_project' in get_group_perms(self.request.user, project)
or self.request.user.groups.filter(name='admin').exists()):
instance = serializer.save(created_by=self.request.user)
self.clone_group_permissions(instance.project, instance)
else:
raise ValidationError('You do not have permission to create this')
# Does it have a design?
# If so, parse the design to extract info to get parts from
# inventory.
self._parse_design(instance)
class ProductStatusViewSet(viewsets.ModelViewSet):
queryset = ProductStatus.objects.all()
serializer_class = ProductStatusSerializer
permission_classes = (IsInAdminGroupOrRO,)
| import django_filters
from rest_framework import viewsets
from rest_framework.validators import ValidationError
from rest_framework.filters import (OrderingFilter,
SearchFilter,
DjangoFilterBackend)
from guardian.shortcuts import get_group_perms
from lims.shared.filters import ListFilter
from lims.permissions.permissions import (IsInAdminGroupOrRO,
ViewPermissionsMixin,
ExtendedObjectPermissions,
ExtendedObjectPermissionsFilter)
from .models import (Product, ProductStatus, Project)
from .serializers import (ProjectSerializer, ProductSerializer,
DetailedProductSerializer, ProductStatusSerializer)
from .parsers import DesignFileParser
class ProjectViewSet(ViewPermissionsMixin, viewsets.ModelViewSet):
"""
View all projects the user has permissions for
Projects are filtered by permissions and users cannot see any
projects they do not have permissions for.
"""
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = (ExtendedObjectPermissions,)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
search_fields = ('project_identifier', 'name', 'primary_lab_contact__username')
def perform_create(self, serializer):
serializer, permissions = self.clean_serializer_of_permissions(serializer)
instance = serializer.save(created_by=self.request.user)
self.assign_permissions(instance, permissions)
class ProductFilter(django_filters.FilterSet):
# on_workflow_as = django_filters.MethodFilter()
id__in = ListFilter(name='id')
def filter_on_workflow_as(self, queryset, value):
if value == 'False':
return queryset.filter(on_workflow_as__isnull=True)
elif value == 'True':
return queryset.filter(on_workflow_as__isnull=False)
return queryset
class Meta:
model = Product
fields = {
'id': ['exact', 'in'],
'project': ['exact'],
'status': ['exact'],
# 'on_workflow_as': ['exact'],
}
class ProductViewSet(ViewPermissionsMixin, viewsets.ModelViewSet):
"""
Provides a list of all products
"""
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = (ExtendedObjectPermissions,)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
search_fields = ('product_identifier', 'name',)
filter_class = ProductFilter
def _parse_design(self, instance):
"""
Takes a design file and extracts the necessary info
out to add inventory items or other things.
"""
if instance.design is not None:
items = []
parser = DesignFileParser(instance.design)
if instance.design_format == 'csv':
items = parser.parse_csv()
elif instance.design_format == 'gb':
items = parser.parse_gb()
for i in items:
instance.linked_inventory.add(i)
def get_serializer_class(self):
# Use a more compact serializer when listing.
# This makes things run more efficiantly.
if self.action == 'retrieve':
return DetailedProductSerializer
return ProductSerializer
def perform_create(self, serializer):
# Ensure the user has the correct permissions on the Project
# to add a product to it.
project = serializer.validated_data['project']
if ('change_project' in get_group_perms(self.request.user, project)
or self.request.user.groups.filter(name='admin').exists()):
instance = serializer.save(created_by=self.request.user)
self.clone_group_permissions(instance.project, instance)
else:
raise ValidationError('You do not have permission to create this')
# Does it have a design?
# If so, parse the design to extract info to get parts from
# inventory.
self._parse_design(instance)
class ProductStatusViewSet(viewsets.ModelViewSet):
queryset = ProductStatus.objects.all()
serializer_class = ProductStatusSerializer
permission_classes = (IsInAdminGroupOrRO,)
| Python | 0 |
1809df6d5886ac6c0c35c8e879d9eda334606f4e | Simplify handling from_db_value across django versions | django_unixdatetimefield/fields.py | django_unixdatetimefield/fields.py | import datetime
import time
import django.db.models as models
class UnixDateTimeField(models.DateTimeField):
# TODO(niklas9):
# * should we take care of transforming between time zones in any way here ?
# * get default datetime format from settings ?
DEFAULT_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
TZ_CONST = '+'
# TODO(niklas9):
# * metaclass below just for Django < 1.9, fix a if stmt for it?
#__metaclass__ = models.SubfieldBase
description = "Unix timestamp integer to datetime object"
def get_internal_type(self):
return 'PositiveIntegerField'
def to_python(self, val):
if val is None or isinstance(val, datetime.datetime):
return val
if isinstance(val, datetime.date):
return datetime.datetime(val.year, val.month, val.day)
elif self._is_string(val):
# TODO(niklas9):
# * not addressing time zone support as todo above for now
if self.TZ_CONST in val:
val = val.split(self.TZ_CONST)[0]
return datetime.datetime.strptime(val, self.DEFAULT_DATETIME_FMT)
else:
return datetime.datetime.fromtimestamp(float(val))
def _is_string(value, val):
try:
return isinstance(val, unicode)
except NameError:
return isinstance(val, str)
def get_db_prep_value(self, val, *args, **kwargs):
if val is None:
if self.default == models.fields.NOT_PROVIDED: return None
return self.default
return int(time.mktime(val.timetuple()))
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return self.to_python(val).strftime(self.DEFAULT_DATETIME_FMT)
def from_db_value(self, val, *args, **kwargs):
return self.to_python(val)
| import datetime
import time
import django
import django.db.models as models
class UnixDateTimeField(models.DateTimeField):
# TODO(niklas9):
# * should we take care of transforming between time zones in any way here ?
# * get default datetime format from settings ?
DEFAULT_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
TZ_CONST = '+'
# TODO(niklas9):
# * metaclass below just for Django < 1.9, fix a if stmt for it?
#__metaclass__ = models.SubfieldBase
description = "Unix timestamp integer to datetime object"
def get_internal_type(self):
return 'PositiveIntegerField'
def to_python(self, val):
if val is None or isinstance(val, datetime.datetime):
return val
if isinstance(val, datetime.date):
return datetime.datetime(val.year, val.month, val.day)
elif self._is_string(val):
# TODO(niklas9):
# * not addressing time zone support as todo above for now
if self.TZ_CONST in val:
val = val.split(self.TZ_CONST)[0]
return datetime.datetime.strptime(val, self.DEFAULT_DATETIME_FMT)
else:
return datetime.datetime.fromtimestamp(float(val))
def _is_string(value, val):
try:
return isinstance(val, unicode)
except NameError:
return isinstance(val, str)
def get_db_prep_value(self, val, *args, **kwargs):
if val is None:
if self.default == models.fields.NOT_PROVIDED: return None
return self.default
return int(time.mktime(val.timetuple()))
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return self.to_python(val).strftime(self.DEFAULT_DATETIME_FMT)
# Django 2.0 updates the signature of from_db_value.
# https://docs.djangoproject.com/en/2.0/releases/2.0/#context-argument-of-field-from-db-value-and-expression-convert-value
if django.VERSION < (2,):
def from_db_value(self, val, expression, connection, context):
return self.to_python(val)
else:
def from_db_value(self, val, expression, connection):
return self.to_python(val)
| Python | 0.000017 |
20053951b3036d0ae49f7f1ae25d600848872c82 | Bump version | lintreview/__init__.py | lintreview/__init__.py | __version__ = '2.36.2'
| __version__ = '2.36.1'
| Python | 0 |
f426d44f82a4f1855cb180b5aff98221c14537f1 | Update version.py | nltools/version.py | nltools/version.py | """Specifies current version of nltools to be used by setup.py and __init__.py
"""
__version__ = '0.3.7'
| """Specifies current version of nltools to be used by setup.py and __init__.py
"""
__version__ = '0.3.6'
| Python | 0.000001 |
fcf5d1f33026069d69690c67f7ddcc8c77f15626 | add exception handingling for debug | opreturnninja/views.py | opreturnninja/views.py | import json
import random
from pyramid.view import view_config
from .constants import ELECTRUM_SERVERS
from bitcoin.rpc import RawProxy, DEFAULT_USER_AGENT
import socket
@view_config(route_name='api', renderer='json')
def api_view(request):
global rpc
assert hasattr(request, 'json_body')
assert 'method' in request.json_body and 'params' in request.json_body
method = request.json_body['method']
params = request.json_body['params']
assert type(params) == list
if method == 'sendrawtransaction':
assert len(params) == 1
sent = False
while not sent:
try:
server = random.choice(list(ELECTRUM_SERVERS.items()))
s = socket.create_connection(server)
s.send(b'{"id":"0", "method":"blockchain.transaction.broadcast", "params":["' + params[0].encode() + b'"]}\n')
r = {'result': s.recv(1024)[:-1].decode(), 'error': None, 'id': request.json_body['id']} # the slice is to remove the trailing new line
print(r)
return r
except ConnectionRefusedError as e:
print(e, server)
except socket.gaierror as e:
print(e, server)
except Exception as e:
print(e, server)
return {
'result': None,
'error': 'RPC Request Unknown',
'id': request.json_body['id'],
}
@view_config(route_name='index', renderer='templates/index.pt')
def index_view(request):
return {} | import json
import random
from pyramid.view import view_config
from .constants import ELECTRUM_SERVERS
from bitcoin.rpc import RawProxy, DEFAULT_USER_AGENT
import socket
@view_config(route_name='api', renderer='json')
def api_view(request):
global rpc
assert hasattr(request, 'json_body')
assert 'method' in request.json_body and 'params' in request.json_body
method = request.json_body['method']
params = request.json_body['params']
assert type(params) == list
if method == 'sendrawtransaction':
assert len(params) == 1
sent = False
while not sent:
try:
s = socket.create_connection(random.choice(list(ELECTRUM_SERVERS.items())))
s.send(b'{"id":"0", "method":"blockchain.transaction.broadcast", "params":["' + params[0].encode() + b'"]}\n')
r = {'result': s.recv(1024)[:-1].decode(), 'error': None, 'id': request.json_body['id']} # the slice is to remove the trailing new line
print(r)
return r
except ConnectionRefusedError as e:
print(e)
except socket.gaierror as e:
print(e)
return {
'result': None,
'error': 'RPC Request Unknown',
'id': request.json_body['id'],
}
@view_config(route_name='index', renderer='templates/index.pt')
def index_view(request):
return {} | Python | 0 |
43d7850403e1e98951909bcb0c441098c3221bde | Update ipc_lista1.4.py | lista1/ipc_lista1.4.py | lista1/ipc_lista1.4.py | #ipc_lista1.4
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um Programa que peça as 4 notas bimestrais e mostre a media
nota1 = int(input("Digite a primeira nota do bimestre: "))
nota2 = int(input("Digite a segunda nota do bimestre: "))
nota3 = int(input("Digite a terceira nota do bismestre: "))
nota4 - int(input("Digite a quarta note do bismestre: "))
print
media = (nota1+nota2+nota3+nota4)/ 4.0
print" A sua média é: %s" %media
| #ipc_lista1.4
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um Programa que peça as 4 notas bimestrais e mostre a media
nota1 = int(input("Digite a primeira nota do bimestre: "))
nota2 = int(input("Digite a segunda nota do bimestre: "))
nota3 = int(input("Digite a terceira nota do bismestre: "))
nota4 - int(input("Digite a quarta note do bismestre: "))
print
media = (nota1+nota2+nota3+nota4)/4.0
print" A sua média é: %s" %media
| Python | 0 |
fb772e5e597082a119348efa68f70e60c11506cd | clean up | lists/gift_exchange.py | lists/gift_exchange.py |
import random
import itertools
givers = [('tim', 'shirt'), ('jim', 'shoe'), ('john', 'ball'), ('joe', 'fruit')]
if len(givers) < 2:
print "must have more than 1 givers"
else:
a = list(givers)
b = list(givers)
while a == b:
random.shuffle(a)
random.shuffle(b)
for i, j in itertools.izip(a, b):
print '%s gives %s to %s.' % (i[0], i[1], j[0])
|
import random
import itertools
givers = [('tim', 'shirt'), ('jim', 'shoe'), ('joe', 'fruit'), ('john', 'ball')]
def valid(a, b):
if a == b:
return False
else:
return True
if len(givers) < 2:
print "must have more than 1 givers"
else:
a = list(givers)
b = list(givers)
while not valid(a, b):
random.shuffle(a)
random.shuffle(b)
for i, j in itertools.izip(a, b):
print '%s gives %s to %s.' % (i[0], i[1], j[0])
| Python | 0.000001 |
b76e1697b92565ca3fc8a7ee2961adf894095e04 | Add User as foreign key in Bill | billing/models.py | billing/models.py | from django.db import models
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, pre_init
import datetime
class Bill(models.Model):
user = models.ForeignKey(User)
number = models.CharField(max_length=10, unique=True, blank=True)
isPaid = models.BooleanField(default=False)
billing_date = models.DateField()
class Service(models.Model):
reference = models.CharField(max_length=5)
name = models.CharField(max_length=128)
description = models.CharField(max_length=1024)
price = models.FloatField()
def __unicode__(self):
""" Return name as object representation """
return self.name
class BillLine(models.Model):
bill = models.ForeignKey(Bill)
service = models.ForeignKey(Service)
quantity = models.SmallIntegerField(default=1)
total = models.FloatField(blank=True)
class UserProfile(models.Model):
""" extend User class """
user = models.OneToOneField(User)
billing_address = models.CharField(max_length=1024)
@receiver(pre_save, sender=BillLine)
def compute_total(sender, instance, **kwargs):
""" set total of line automatically """
if not instance.total:
instance.total = instance.service.price * instance.quantity
@receiver(pre_save, sender=Bill)
def define_number(sender, instance, **kwargs):
""" set bill number incrementally """
# only when we create record for the first time
if not instance.number:
today = datetime.date.today()
# get last id in base, we assume it's the last record
try:
last_record = sender.objects.latest('id')
#get last bill number and increment it
last_num = '%03d' % (int(last_record.number[-3:])+1)
# no Bill in db
except sender.DoesNotExist:
last_num = '001'
instance.number = 'F%s%s' % (today.strftime('%Y%m'), last_num)
| from django.db import models
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, pre_init
import datetime
class Bill(models.Model):
number = models.CharField(max_length=10, unique=True, blank=True)
isPaid = models.BooleanField(default=False)
billing_date = models.DateField()
class Service(models.Model):
reference = models.CharField(max_length=5)
name = models.CharField(max_length=128)
description = models.CharField(max_length=1024)
price = models.FloatField()
def __unicode__(self):
""" Return name as object representation """
return self.name
class BillLine(models.Model):
bill = models.ForeignKey(Bill)
service = models.ForeignKey(Service)
quantity = models.SmallIntegerField(default=1)
total = models.FloatField(blank=True)
class UserProfile(models.Model):
""" extend User class """
user = models.OneToOneField(User)
billing_address = models.CharField(max_length=1024)
@receiver(pre_save, sender=BillLine)
def compute_total(sender, instance, **kwargs):
""" set total of line automatically """
if not instance.total:
instance.total = instance.service.price * instance.quantity
@receiver(pre_save, sender=Bill)
def define_number(sender, instance, **kwargs):
""" set bill number incrementally """
# only when we create record for the first time
if not instance.number:
today = datetime.date.today()
# get last id in base, we assume it's the last record
try:
last_record = sender.objects.latest('id')
#get last bill number and increment it
last_num = '%03d' % (int(last_record.number[-3:])+1)
# no Bill in db
except sender.DoesNotExist:
last_num = '001'
instance.number = 'F%s%s' % (today.strftime('%Y%m'), last_num)
| Python | 0.000001 |
17d3d63564798cd03788ce579227d5425cd866c0 | Make fake uploader use zlib compression | bin/fake_order.py | bin/fake_order.py | #!/usr/bin/env python
"""
A fake order upload script, used to manually test the whole stack.
"""
import simplejson
import requests
import zlib
data = """
{
"resultType" : "orders",
"version" : "0.1alpha",
"uploadKeys" : [
{ "name" : "emk", "key" : "abc" },
{ "name" : "ec" , "key" : "def" }
],
"generator" : { "name" : "Yapeal", "version" : "11.335.1737" },
"currentTime" : "2011-10-22T15:46:00+00:00",
"columns" : ["price","volRemaining","range","orderID","volEntered","minVolume","bid","issueDate","duration","stationID","solarSystemID"],
"rowsets" : [
{
"generatedAt" : "2011-10-22T15:43:00+00:00",
"regionID" : 10000065,
"typeID" : 11134,
"rows" : [
[8999,1,32767,2363806077,1,1,false,"2011-12-03T08:10:59+00:00",90,60008692,30005038],
[11499.99,10,32767,2363915657,10,1,false,"2011-12-03T10:53:26+00:00",90,60006970,null],
[11500,48,32767,2363413004,50,1,false,"2011-12-02T22:44:01+00:00",90,60006967,30005039]
]
},
{
"generatedAt" : "2011-10-22T15:42:00+00:00",
"regionID" : null,
"typeID" : 11135,
"rows" : [
[8999,1,32767,2363806077,1,1,false,"2011-12-03T08:10:59+00:00",90,60008692,30005038],
[11499.99,10,32767,2363915657,10,1,false,"2011-12-03T10:53:26+00:00",90,60006970,null],
[11500,48,32767,2363413004,50,1,false,"2011-12-02T22:44:01+00:00",90,60006967,30005039]
]
}
]
}
"""
data = simplejson.loads(data)
data = zlib.compress(simplejson.dumps(data))
headers = {
'Content-Encoding': 'gzip',
}
r = requests.post(
'http://eve-emdr.local/upload/unified/',
#'http://localhost:8080/upload/unified/',
data=data,
headers=headers,
)
print "Sent fake order."
| #!/usr/bin/env python
"""
A fake order upload script, used to manually test the whole stack.
"""
import simplejson
import requests
data = """
{
"resultType" : "orders",
"version" : "0.1alpha",
"uploadKeys" : [
{ "name" : "emk", "key" : "abc" },
{ "name" : "ec" , "key" : "def" }
],
"generator" : { "name" : "Yapeal", "version" : "11.335.1737" },
"currentTime" : "2011-10-22T15:46:00+00:00",
"columns" : ["price","volRemaining","range","orderID","volEntered","minVolume","bid","issueDate","duration","stationID","solarSystemID"],
"rowsets" : [
{
"generatedAt" : "2011-10-22T15:43:00+00:00",
"regionID" : 10000065,
"typeID" : 11134,
"rows" : [
[8999,1,32767,2363806077,1,1,false,"2011-12-03T08:10:59+00:00",90,60008692,30005038],
[11499.99,10,32767,2363915657,10,1,false,"2011-12-03T10:53:26+00:00",90,60006970,null],
[11500,48,32767,2363413004,50,1,false,"2011-12-02T22:44:01+00:00",90,60006967,30005039]
]
},
{
"generatedAt" : "2011-10-22T15:42:00+00:00",
"regionID" : null,
"typeID" : 11135,
"rows" : [
[8999,1,32767,2363806077,1,1,false,"2011-12-03T08:10:59+00:00",90,60008692,30005038],
[11499.99,10,32767,2363915657,10,1,false,"2011-12-03T10:53:26+00:00",90,60006970,null],
[11500,48,32767,2363413004,50,1,false,"2011-12-02T22:44:01+00:00",90,60006967,30005039]
]
}
]
}
"""
data = simplejson.loads(data)
data = simplejson.dumps(data)
r = requests.post(
'http://localhost:8080/upload/unified/',
data=data,
)
print "RESPONSE"
print r.text | Python | 0.000003 |
f7a86cec72e4b5ff017013561f4fd3f3f59bfde5 | Fix typos | AutoSetNewFileSyntax.py | AutoSetNewFileSyntax.py | import sublime
import sublime_plugin
import sys
import os
import logging
sys.path.insert(0, os.path.dirname(__file__))
from SyntaxMappings import *
PLUGIN_NAME = 'AutoSetNewFileSyntax'
LOG_LEVEL = logging.INFO
LOG_FORMAT = "%(name)s: [%(levelname)s] %(message)s"
settings = None
syntaxMappings = None
loggingStreamHandler = None
logger = None
def plugin_unloaded():
global settings, loggingStreamHandler, logger
settings.clear_on_change("syntax_mapping")
logger.removeHandler(loggingStreamHandler)
def plugin_loaded():
global settings, syntaxMappings, loggingStreamHandler, logger
# create logger stream handler
loggingStreamHandler = logging.StreamHandler()
loggingStreamHandler.setFormatter(logging.Formatter(LOG_FORMAT))
# config logger
logger = logging.getLogger(PLUGIN_NAME)
logger.setLevel(LOG_LEVEL)
logger.addHandler(loggingStreamHandler)
settings = sublime.load_settings(PLUGIN_NAME+".sublime-settings")
syntaxMappings = SyntaxMappings(settings=settings, logger=logger)
# rebuilt syntax mappings if there is an user settings update
settings.add_on_change("syntax_mapping", syntaxMappings.rebuildSyntaxMappings)
class AutoSetNewFileSyntax(sublime_plugin.EventListener):
global settings, syntaxMappings
def on_activated_async(self, view):
if (
self.isEventListenerEnabled('on_activated_async') and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_clone_async(self, view):
if (
self.isEventListenerEnabled('on_clone_async') and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_load_async(self, view):
if (
self.isEventListenerEnabled('on_load_async') and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_modified_async(self, view):
if (
self.isEventListenerEnabled('on_modified_async') and
self.isOnlyOneCursor(view) and
self.isFirstCursorNearBeginning(view) and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_pre_save_async(self, view):
if (
self.isEventListenerEnabled('on_pre_save_async') and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def isEventListenerEnabled(self, event):
try:
return settings.get("event_listeners", None)[event]
except:
return False
def isOnlyOneCursor(self, view):
""" check there is only one cursor """
return len(view.sel()) == 1
def isFirstCursorNearBeginning(self, view):
""" check the cursor is at first few lines """
return view.rowcol(view.sel()[0].a)[0] < 2
def isScopePlainText(self, view):
""" check the scope of the first line is plain text """
return view.scope_name(0).strip() == 'text.plain'
def matchAndSetSyntax(self, view):
firstLine = self.getPartialFirstLine(view)
for syntaxMapping in syntaxMappings.value():
syntaxFile, firstLineMatchRegexes = syntaxMapping
for firstLineMatchRegex in firstLineMatchRegexes:
if firstLineMatchRegex.search(firstLine) is not None:
view.set_syntax_file(syntaxFile)
return
def getPartialFirstLine(self, view):
region = view.line(0)
firstLineLengthMax = settings.get('first_line_length_max')
if firstLineLengthMax >= 0:
# if the first line is longer than the max line length,
# then use the max line length
# otherwise use the actual length of the first line
region = sublime.Region(0, min(region.end(), firstLineLengthMax))
return view.substr(region)
| import sublime
import sublime_plugin
import sys
import os
import logging
sys.path.insert(0, os.path.dirname(__file__))
from SyntaxMappings import *
PLUGIN_NAME = 'AutoSetNewFileSyntax'
LOG_LEVEL = logging.INFO
LOG_FORMAT = "%(name)s: [%(levelname)s] %(message)s"
settings = None
syntaxMappings = None
loggingStreamHandler = None
logger = None
def plugin_unloaded():
global settings, loggingStreamHandler, logger
settings.clear_on_change("syntax_mapping")
logger.removeHandler(loggingStreamHandler)
def plugin_loaded():
global settings, syntaxMappings, loggingStreamHandler, logger
# create logger stream handler
loggingStreamHandler = logging.StreamHandler()
loggingStreamHandler.setFormatter(logging.Formatter(LOG_FORMAT))
# config logger
logger = logging.getLogger(PLUGIN_NAME)
logger.setLevel(LOG_LEVEL)
logger.addHandler(loggingStreamHandler)
settings = sublime.load_settings(PLUGIN_NAME+".sublime-settings")
syntaxMappings = SyntaxMappings(settings=settings, logger=logger)
# rebuilt syntax mappings if there is an user settings update
settings.add_on_change("syntax_mapping", syntaxMappings.rebuildSyntaxMappings)
class AutoSetNewFileSyntax(sublime_plugin.EventListener):
global settings, syntaxMappings
def on_activated_async(self, view):
if (
self.isEventListenerEnabled('on_activated_async') and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_clone_async(self, view):
if (
self.isEventListenerEnabled('on_clone_async') and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_load_async(self, view):
if (
self.isEventListenerEnabled('on_load_async') and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_modified_async(self, view):
if (
self.isEventListenerEnabled('on_modified_async') and
self.isOnlyOneCursor(view) and
self.isFirstCursorNearBeginning(view) and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def on_pre_save_async(self, view):
if (
self.isEventListenerEnabled('on_pre_save_async') and
self.isOnlyOneCursor(view) and
self.isFirstCursorNearBeginning(view) and
self.isScopePlainText(view)
):
self.matchAndSetSyntax(view)
def isEventListenerEnabled(self, event):
try:
return settings.get("event_listeners", None)[event]
except:
return False
def isOnlyOneCursor(self, view):
""" check there is only one cursor """
return len(view.sel()) == 1
def isFirstCursorNearBeginning(self, view):
""" check the cursor is at first few lines """
return view.rowcol(view.sel()[0].a)[0] < 2
def isScopePlainText(self, view):
""" check the scope of the first line is plain text """
return view.scope_name(0).strip() == 'text.plain'
def matchAndSetSyntax(self, view):
firstLine = self.getPartialFirstLine(view)
for syntaxMapping in syntaxMappings.value():
syntaxFile, firstLineMatchRegexes = syntaxMapping
for firstLineMatchRegex in firstLineMatchRegexes:
if firstLineMatchRegex.search(firstLine) is not None:
view.set_syntax_file(syntaxFile)
return
def getPartialFirstLine(self, view):
region = view.line(0)
firstLineLengthMax = settings.get('first_line_length_max')
if firstLineLengthMax >= 0:
# if the first line is longer than the max line length,
# then use the max line length
# otherwise use the actual length of the first line
region = sublime.Region(0, min(region.end(), firstLineLengthMax))
return view.substr(region)
| Python | 0.999999 |
348896e6f9318755d9bbefdf94de18ed32b17d1d | Update item.py | item.py | item.py | import pygame
class Item(pygame.sprite.Sprite):
def __init__(self, level, *groups):
super(Item, self).__init__(*groups)
#the game level
self.level = level
#base image
self.level.animator.set_Img(0,5)
self.image = self.level.animator.get_Img().convert()
self.image.set_colorkey((255,0,0))
self.level.animator.set_Img(6,0)
self.secretimage = self.level.animator.get_Img().convert()
self.secretimage.set_colorkey((255,0,0))
#type
self.flavor_saver = ['gem', 'axe', 'sammich', 'telescope']
self.flavor = 'gem'
#location
self.firstflag = True
self.scrnx = 0
self.scrny = 0
self.mapx = 0
self.mapy = 0
def spawn(self,x,y):
self.scrnx = x
self.scrny = y
if self.firstflag:
self.mapx = x
self.mapy = y
self.firstflag = False
self.rect = pygame.rect.Rect((x * self.level.tilex, y * self.level.tiley), self.image.get_size())
def set_type(self, itype):
self.flavor = self.flavor_saver[itype]
if itype == 0:
xind = 6
yind = 0
if itype == 1:
xind = 6
yind = 5
if itype == 2:
xind = 6
yind = 4
if itype == 3:
xind = 6
yind = 3
self.level.animator.set_Img(xind,yind)
self.secretimage = self.level.animator.get_Img().convert()
self.secretimage.set_colorkey((255,0,0))
def reveal(self):
self.image = self.secretimage
def set_Index(self, x, y):
self.scrnx = x
self.rect.x = x*self.level.tilex
self.scrny = y
self.rect.y = y*self.level.tiley
def get_Index(self, axis):
if axis == 'X':
return self.scrnx
if axis == 'Y':
return self.scrny
return -1
| import pygame
class Item(pygame.sprite.Sprite):
def __init__(self, level, *groups):
super(Item, self).__init__(*groups)
#the game level
self.level = level
#base image
self.level.animator.set_Img(6,0)
self.image = self.level.animator.get_Img().convert()
self.image.set_colorkey((255,0,0))
#type
self.flavor_saver = ['gem', 'axe', 'sammich']
self.flavor = 'gem'
#location
self.firstflag = True
self.scrnx = 0
self.scrny = 0
self.mapx = 0
self.mapy = 0
def spawn(self,x,y):
self.scrnx = x
self.scrny = y
if self.firstflag:
self.mapx = x
self.mapy = y
self.firstflag = False
self.rect = pygame.rect.Rect((x * self.level.tilex, y * self.level.tiley), self.image.get_size())
def set_type(self, itype):
self.flavor = self.flavor_saver[itype]
if itype == 0:
xind = 6
yind = 0
if itype == 1:
xind = 6
yind = 5
if itype == 2:
xind = 6
yind = 4
self.level.animator.set_Img(xind,yind)
self.image = self.level.animator.get_Img().convert()
self.image.set_colorkey((255,0,0))
def set_Index(self, x, y):
self.scrnx = x
self.rect.x = x*self.level.tilex
self.scrny = y
self.rect.y = y*self.level.tiley
def get_Index(self, axis):
if axis == 'X':
return self.scrnx
if axis == 'Y':
return self.scrny
return -1
| Python | 0 |
e74420b90e83ade7956023eaf4ef2613e441a9ca | Fix linter error with ambiguous variable name 'l'. | bitfield/forms.py | bitfield/forms.py | from __future__ import absolute_import
from django.forms import CheckboxSelectMultiple, IntegerField, ValidationError
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from bitfield.types import BitHandler
class BitFieldCheckboxSelectMultiple(CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if isinstance(value, BitHandler):
value = [k for k, v in value if v]
elif isinstance(value, int):
real_value = []
div = 2
for (k, v) in self.choices:
if value % div != 0:
real_value.append(k)
value -= (value % div)
div *= 2
value = real_value
return super(BitFieldCheckboxSelectMultiple, self).render(
name, value, attrs=attrs)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if initial != data:
return True
initial_set = set([force_text(value) for value in initial])
data_set = set([force_text(value) for value in data])
return data_set != initial_set
class BitFormField(IntegerField):
def __init__(self, choices=(), widget=BitFieldCheckboxSelectMultiple, *args, **kwargs):
if isinstance(kwargs['initial'], int):
iv = kwargs['initial']
iv_list = []
for i in range(0, min(len(choices), 63)):
if (1 << i) & iv > 0:
iv_list += [choices[i][0]]
kwargs['initial'] = iv_list
self.widget = widget
super(BitFormField, self).__init__(widget=widget, *args, **kwargs)
self.choices = self.widget.choices = choices
def clean(self, value):
if not value:
return 0
# Assume an iterable which contains an item per flag that's enabled
result = BitHandler(0, [k for k, v in self.choices])
for k in value:
try:
setattr(result, str(k), True)
except AttributeError:
raise ValidationError('Unknown choice: %r' % (k,))
return int(result)
| from __future__ import absolute_import
from django.forms import CheckboxSelectMultiple, IntegerField, ValidationError
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from bitfield.types import BitHandler
class BitFieldCheckboxSelectMultiple(CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if isinstance(value, BitHandler):
value = [k for k, v in value if v]
elif isinstance(value, int):
real_value = []
div = 2
for (k, v) in self.choices:
if value % div != 0:
real_value.append(k)
value -= (value % div)
div *= 2
value = real_value
return super(BitFieldCheckboxSelectMultiple, self).render(
name, value, attrs=attrs)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if initial != data:
return True
initial_set = set([force_text(value) for value in initial])
data_set = set([force_text(value) for value in data])
return data_set != initial_set
class BitFormField(IntegerField):
def __init__(self, choices=(), widget=BitFieldCheckboxSelectMultiple, *args, **kwargs):
if isinstance(kwargs['initial'], int):
iv = kwargs['initial']
l = []
for i in range(0, min(len(choices), 63)):
if (1 << i) & iv > 0:
l += [choices[i][0]]
kwargs['initial'] = l
self.widget = widget
super(BitFormField, self).__init__(widget=widget, *args, **kwargs)
self.choices = self.widget.choices = choices
def clean(self, value):
if not value:
return 0
# Assume an iterable which contains an item per flag that's enabled
result = BitHandler(0, [k for k, v in self.choices])
for k in value:
try:
setattr(result, str(k), True)
except AttributeError:
raise ValidationError('Unknown choice: %r' % (k,))
return int(result)
| Python | 0 |
1725036d83f001493ff2c2d443c6814e0c491dfc | implement load_setting_file and get_weather_information | iwrs.py | iwrs.py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
""" [NAME] YOLP(気象情報) を使用して指定時間後(設定ファイル)に雨が降るかどうかを知らせる.
[DESCRIPTION] YOLP(気象情報) から降水強度を取得し,
指定時間後(設定ファイル)の降水強度が閾値(設定ファイル)以上である場合に,
指定の音声ファイル(設定ファイル)を再生する.
YOLP(気象情報):
https://developer.yahoo.co.jp/webapi/map/openlocalplatform/v1/weather.html
"""
from datetime import datetime
import json
import os
import requests
import configparser
def load_setting_file(file_path):
""" [FUNCTIONS] INI形式の設定ファイルを読み込んでConfigParserオブジェクト化し,
必須パラメタ有無のチェック, 値の範囲チェックを実施し, ConfigParserオブジェクトを返す.
必須パラメタ有無チェックおよび値の範囲チェックに問題がある場合は、
ValueErrorをraiseする.
Keyword arguments:
file_path -- 設定ファイル(INI形式)パス(絶対パス, あるいはコマンド実行場所からの相対パス)
Return value: 設定値(ConfigParserオブジェクト)
"""
if not os.path.exists(file_path):
raise OSError
inifile = configparser.SafeConfigParser()
inifile.read(file_path)
download_dir = inifile.get('yolp', 'download_dir')
if not (os.access(download_dir, os.W_OK)):
raise ValueError
after_minutes = int(inifile.get('weather', 'after_minutes'))
if after_minutes < 0 or after_minutes > 60:
raise ValueError
rainfall_threshold = float(inifile.get('weather', 'rainfall_threshold'))
if rainfall_threshold < 0.0:
raise ValueError
return inifile
def get_weather_information(settings):
""" [FUNCTIONS] YOLP(気象情報)から気象情報をJSON形式で取得し,
辞書オブジェクト化して返す.
取得したJSONは指定ディレクトリ(設定ファイル)に保存する.
Keyword arguments:
settings -- 設定値(ConfigParserオブジェクト)
Return value: YOLP(気象情報)から取得したデータ(辞書オブジェクト)
"""
appid = settings.get('yolp', 'appid')
coordinates = settings.get('yolp', 'coordinates')
date = datetime.now().strftime('%Y%m%d%H%M')
url = \
"https://map.yahooapis.jp/weather/V1/place?" \
"appid={appid}&" \
"coordinates={coordinates}&" \
"output=json&" \
"date={date}&" \
"past=0&" \
"interval=10".format(appid=appid, coordinates=coordinates, date=date)
weather_json = requests.get(url).json()
output_json_file = open(
settings.get('yolp', 'download_dir') + "/%s.json" % date, 'w')
json.dump(
weather_json,
output_json_file,
indent=4, separators=(',', ': '), ensure_ascii=False)
output_json_file.close
return weather_json
def parse_weather_information(weather_information_json, settings):
""" [FUNCTIONS] YOLP(気象情報)のデータ(JSON形式)を解析し、降水強度をチェックする.
指定時間後の降水強度が閾値以上の場合, 指定の音声ファイルを再生する.
Keywork arguments:
weather_information_json -- YOLP(気象情報)のデータ(辞書オブジェクト)
settings -- 設定値(ConfigParserオブジェクト)
"""
if __name__ == '__main__':
# TODO 引数処理
# 設定ファイル読み込み
# YOLP(気象情報呼び出し)
# 気象情報解析
settings = load_setting_file("./tests/data/settings_normal.ini")
json = get_weather_information(settings)
| #! /usr/bin/env python
# -*- coding:utf-8 -*-
""" [NAME] YOLP(気象情報) を使用して指定時間後(設定ファイル)に雨が降るかどうかを知らせる.
[DESCRIPTION] YOLP(気象情報) から降水強度を取得し,
指定時間後(設定ファイル)の降水強度が閾値(設定ファイル)以上である場合に,
指定の音声ファイル(設定ファイル)を再生する.
YOLP(気象情報):
https://developer.yahoo.co.jp/webapi/map/openlocalplatform/v1/weather.html
"""
import requests
def load_setting_file(file_path):
""" [FUNCTIONS] INI形式の設定ファイルを読み込んで辞書オブジェクト化し,
必須パラメタ有無のチェック, 値の範囲チェックを実施し, 辞書オブジェクトを返す.
必須パラメタ有無チェックおよび値の範囲チェックに問題がある場合は、
ValueErrorをraiseする.
Keyword arguments:
file_path -- 設定ファイル(INI形式)パス(絶対パス, あるいはコマンド実行場所からの相対パス)
Return value: 設定値(辞書オブジェクト)
"""
def get_weather_information(settings):
""" [FUNCTIONS] YOLP(気象情報)から気象情報をJSON形式で取得し,
辞書オブジェクト化して返す.
Keyword arguments:
settings -- 辞書オブジェクト化した設定値(load_setting_file関数にて得られる値)
Return value: YOLP(気象情報)から取得したデータ(辞書オブジェクト)
"""
def parse_weather_information(weather_information_json, settings):
""" [FUNCTIONS] YOLP(気象情報)のデータ(JSON形式)を解析し、降水強度をチェックする.
指定時間後の降水強度が閾値以上の場合, 指定の音声ファイルを再生する.
Keywork arguments:
weather_information_json -- YOLP(気象情報)のデータ(辞書オブジェクト)
settings -- 辞書オブジェクト化した設定値(load_setting_file関数にて得られる値)
"""
if __name__ == '__main__':
# TODO 引数処理
# 設定ファイル読み込み
# YOLP(気象情報呼び出し)
# 気象情報解析
print "c"
| Python | 0 |
2368d4ae7f49f5e4ea97d3ce8fab57e17c385246 | Change how keypoller checks for input | locust/input_events.py | locust/input_events.py | import gevent
import logging
import os
if os.name == "nt":
from win32api import STD_INPUT_HANDLE
from win32console import (
GetStdHandle,
KEY_EVENT,
ENABLE_ECHO_INPUT,
ENABLE_LINE_INPUT,
ENABLE_PROCESSED_INPUT,
)
else:
import sys
import select
import termios
import tty
class UnixKeyPoller:
def setup(self):
try:
self.stdin = sys.stdin.fileno()
self.tattr = termios.tcgetattr(self.stdin)
tty.setcbreak(self.stdin, termios.TCSANOW)
except termios.error:
pass
return self
def cleanup(self):
termios.tcsetattr(self.stdin, termios.TCSANOW, self.tattr)
def poll(_self):
dr, dw, de = select.select([sys.stdin], [], [], 0)
if not dr == []:
return sys.stdin.read(1)
return None
class WindowsKeyPoller:
def setup(self):
self.read_handle = GetStdHandle(STD_INPUT_HANDLE)
self.read_handle.SetConsoleMode(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT)
self.cur_event_length = 0
self.cur_keys_length = 0
self.captured_chars = []
return self
def cleanup(self):
pass
def poll(self):
if self.captured_chars:
return self.captured_chars.pop(0)
events_peek = self.read_handle.PeekConsoleInput(10000)
if not events_peek:
return None
if not len(events_peek) == self.cur_event_length:
for cur_event in events_peek[self.cur_event_length :]:
if cur_event.EventType == KEY_EVENT:
if ord(cur_event.Char) and cur_event.KeyDown:
cur_char = str(cur_event.Char)
self.captured_chars.append(cur_char)
self.cur_event_length = len(events_peek)
if self.captured_chars:
return self.captured_chars.pop(0)
else:
return None
def get_poller():
if os.name == "nt":
poller = WindowsKeyPoller()
else:
poller = UnixKeyPoller()
poller.setup()
return poller
def input_listener(key_to_func_map):
def input_listener_func():
try:
poller = get_poller()
while True:
input = poller.poll()
if input is not None:
for key in key_to_func_map:
if input == key:
key_to_func_map[key]()
else:
gevent.sleep(0.2)
except Exception:
pass
finally:
poller.cleanup()
return input_listener_func
| import gevent
import logging
import os
if os.name == "nt":
from win32api import STD_INPUT_HANDLE
from win32console import (
GetStdHandle,
KEY_EVENT,
ENABLE_ECHO_INPUT,
ENABLE_LINE_INPUT,
ENABLE_PROCESSED_INPUT,
)
else:
import sys
import select
import termios
import tty
class KeyPoller:
def __enter__(self):
if os.name == "nt":
self.read_handle = GetStdHandle(STD_INPUT_HANDLE)
self.read_handle.SetConsoleMode(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT)
self.cur_event_length = 0
self.cur_keys_length = 0
self.captured_chars = []
else:
try:
self.stdin = sys.stdin.fileno()
self.tattr = termios.tcgetattr(self.stdin)
tty.setcbreak(self.stdin, termios.TCSANOW)
except termios.error:
pass
return self
def __exit__(self, type, value, traceback):
if not os.name == "nt" and hasattr(self, "tattr"):
termios.tcsetattr(self.stdin, termios.TCSANOW, self.tattr)
def poll(self):
if os.name == "nt":
if self.captured_chars:
return self.captured_chars.pop(0)
events_peek = self.read_handle.PeekConsoleInput(10000)
if not events_peek:
return None
if not len(events_peek) == self.cur_event_length:
for cur_event in events_peek[self.cur_event_length :]:
if cur_event.EventType == KEY_EVENT:
if ord(cur_event.Char) and cur_event.KeyDown:
cur_char = str(cur_event.Char)
self.captured_chars.append(cur_char)
self.cur_event_length = len(events_peek)
if self.captured_chars:
return self.captured_chars.pop(0)
else:
return None
else:
dr, dw, de = select.select([sys.stdin], [], [], 0)
if not dr == []:
return sys.stdin.read(1)
return None
def input_listener(key_to_func_map):
def input_listener_func():
with KeyPoller() as poller:
map = key_to_func_map
while True:
input = poller.poll()
if input is not None:
logging.debug(f"Input key: {input}")
for key in map:
if input == key:
map[key]()
else:
gevent.sleep(0.2)
return input_listener_func
| Python | 0.000005 |
5fc8258c4d3819b6a4b23819fd3c4578510dd633 | Allow www.lunahealing.ca as a domain | lunahealing/site_settings/prod.py | lunahealing/site_settings/prod.py | # Django settings for quotations project.
import os
from lunahealing.site_settings.common import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {
'default': dj_database_url.config()
}
DEFAULT_FILE_STORAGE = 's3_folder_storage.s3.DefaultStorage'
DEFAULT_S3_PATH = 'media'
STATICFILES_STORAGE = 's3_folder_storage.s3.StaticStorage'
STATIC_S3_PATH = 'static'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_QUERYSTRING_AUTH = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '//s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '//s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
INSTALLED_APPS.extend([
's3_folder_storage',
'storages',
])
ALLOWED_HOSTS = ['lunahealing.herokuapp.com', 'www.lunahealing.ca']
| # Django settings for quotations project.
import os
from lunahealing.site_settings.common import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {
'default': dj_database_url.config()
}
DEFAULT_FILE_STORAGE = 's3_folder_storage.s3.DefaultStorage'
DEFAULT_S3_PATH = 'media'
STATICFILES_STORAGE = 's3_folder_storage.s3.StaticStorage'
STATIC_S3_PATH = 'static'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_QUERYSTRING_AUTH = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '//s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '//s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
INSTALLED_APPS.extend([
's3_folder_storage',
'storages',
])
ALLOWED_HOSTS = ['lunahealing.herokuapp.com', '*.lunahealing.ca']
| Python | 0 |
ed472902f71f39cf09eca5ee9193bcf99283b566 | Remove unused code | room.py | room.py | # Each PS room joined creates an object here.
# Objects control settings on a room-per-room basis, meaning every room can
# be treated differently.
from plugins.tournaments import Tournament
class Room:
def __init__(self, room, data):
if not data:
# This is a hack to support both strings and dicts as input to the class
data = {'moderate':False, 'allow games':False}
self.users = {}
self.loading = True
self.title = room
self.moderate = data['moderate']
self.allowGames = data['allow games']
self.tour = None
self.game = None
def doneLoading(self):
self.loading = False
def addUserlist(self, users):
self.users = {u[1:]:u[0] for u in users.split(',')}
def addUser(self, user, auth):
if user not in self.users:
self.users[user] = auth
def removeUser(self, user):
if user in self.users:
self.users.pop(user)
def renamedUser(self, old, new):
self.removeUser(old)
self.addUser(new[1:], new[0])
def createTour(self, ws):
self.tour = Tournament(ws, self.title)
def endTour(self):
self.tour = None
| # Each PS room joined creates an object here.
# Objects control settings on a room-per-room basis, meaning every room can
# be treated differently.
from plugins.tournaments import Tournament
class Room:
def __init__(self, room, data):
if not data:
# This is a hack to support both strings and dicts as input to the class
data = {'moderate':False, 'allow games':False}
self.users = {}
self.loading = True
self.title = room
self.moderate = data['moderate']
self.allowGames = data['allow games']
self.tour = None
self.game = None
def doneLoading(self):
self.loading = False
def addUserlist(self, users):
self.users = {u[1:]:u[0] for u in users.split(',')}
def addUser(self, user, auth):
if user not in self.users:
self.users[user] = auth
def removeUser(self, user):
if user in self.users:
self.users.pop(user)
def renamedUser(self, old, new):
self.removeUser(old)
self.addUser(new[1:], new[0])
def allowGames(self, yesNo):
self.allowGames = yesNo
def createTour(self, ws):
self.tour = Tournament(ws, self.title)
def endTour(self):
self.tour = None
| Python | 0.000006 |
bd313ff4ce69e7b9a9765672442ef6cf9fa00dba | Fix parameter validation tests | tests/core/parameter_validation/test_parameter_clone.py | tests/core/parameter_validation/test_parameter_clone.py | import os
from openfisca_core.parameters import ParameterNode
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
year = 2016
def test_clone():
path = os.path.join(BASE_DIR, 'filesystem_hierarchy')
parameters = ParameterNode('', directory_path = path)
parameters_at_instant = parameters('2016-01-01')
assert parameters_at_instant.node1.param == 1.0
clone = parameters.clone()
clone_at_instant = clone('2016-01-01')
assert clone_at_instant.node1.param == 1.0
assert id(clone) != id(parameters)
assert id(clone.node1) != id(parameters.node1)
assert id(clone.node1.param) != id(parameters.node1.param)
def test_clone_parameter(tax_benefit_system):
param = tax_benefit_system.parameters.taxes.income_tax_rate
clone = param.clone()
assert clone is not param
assert clone.values_list is not param.values_list
assert clone.values_list[0] is not param.values_list[0]
assert clone.values_list == param.values_list
def test_clone_parameter_node(tax_benefit_system):
node = tax_benefit_system.parameters.taxes
clone = node.clone()
assert clone is not node
assert clone.income_tax_rate is not node.income_tax_rate
assert clone.children['income_tax_rate'] is not node.children['income_tax_rate']
def test_clone_scale(tax_benefit_system):
scale = tax_benefit_system.parameters.taxes.social_security_contribution
clone = scale.clone()
assert clone.brackets[0] is not scale.brackets[0]
assert clone.brackets[0].rate is not scale.brackets[0].rate
def test_deep_edit(tax_benefit_system):
parameters = tax_benefit_system.parameters
clone = parameters.clone()
param = parameters.taxes.income_tax_rate
clone_param = clone.taxes.income_tax_rate
original_value = param.values_list[0].value
clone_param.values_list[0].value = 100
assert param.values_list[0].value == original_value
scale = parameters.taxes.social_security_contribution
clone_scale = clone.taxes.social_security_contribution
original_scale_value = scale.brackets[0].rate.values_list[0].value
clone_scale.brackets[0].rate.values_list[0].value = 10
assert scale.brackets[0].rate.values_list[0].value == original_scale_value
| # -*- coding: utf-8 -*-
from ..test_countries import tax_benefit_system
import os
from openfisca_core.parameters import ParameterNode
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
year = 2016
def test_clone():
path = os.path.join(BASE_DIR, 'filesystem_hierarchy')
parameters = ParameterNode('', directory_path = path)
parameters_at_instant = parameters('2016-01-01')
assert parameters_at_instant.node1.param == 1.0
clone = parameters.clone()
clone_at_instant = clone('2016-01-01')
assert clone_at_instant.node1.param == 1.0
assert id(clone) != id(parameters)
assert id(clone.node1) != id(parameters.node1)
assert id(clone.node1.param) != id(parameters.node1.param)
def test_clone_parameter():
param = tax_benefit_system.parameters.taxes.income_tax_rate
clone = param.clone()
assert clone is not param
assert clone.values_list is not param.values_list
assert clone.values_list[0] is not param.values_list[0]
assert clone.values_list == param.values_list
def test_clone_parameter_node():
node = tax_benefit_system.parameters.taxes
clone = node.clone()
assert clone is not node
assert clone.income_tax_rate is not node.income_tax_rate
assert clone.children['income_tax_rate'] is not node.children['income_tax_rate']
def test_clone_scale():
scale = tax_benefit_system.parameters.taxes.social_security_contribution
clone = scale.clone()
assert clone.brackets[0] is not scale.brackets[0]
assert clone.brackets[0].rate is not scale.brackets[0].rate
def test_deep_edit():
parameters = tax_benefit_system.parameters
clone = parameters.clone()
param = parameters.taxes.income_tax_rate
clone_param = clone.taxes.income_tax_rate
original_value = param.values_list[0].value
clone_param.values_list[0].value = 100
assert param.values_list[0].value == original_value
scale = parameters.taxes.social_security_contribution
clone_scale = clone.taxes.social_security_contribution
original_scale_value = scale.brackets[0].rate.values_list[0].value
clone_scale.brackets[0].rate.values_list[0].value = 10
assert scale.brackets[0].rate.values_list[0].value == original_scale_value
| Python | 0.000003 |
9bbef1ca463f0f83841c6b61ea8aa56c5454dadc | increment stop_id..... | ipa_db.py | ipa_db.py | import sqlite3
class Db:
def __init__(self, db_name):
self.conn = sqlite3.connect(db_name)
def __del__(self):
self.conn.close()
def __execute(self, sql, args = tuple()):
c = self.conn.cursor()
c.execute(sql, args)
return c
def __commit(self):
self.conn.commit()
def remove(self):
self.__execute('DROP TABLE IF EXISTS trains')
self.__execute('DROP TABLE IF EXISTS schedule')
self.__commit()
def create(self):
self.__execute('''CREATE TABLE trains(
train_id integer PRIMARY KEY,
train_number text,
train_operator text,
train_date text,
train_relation text
)''')
self.__execute('''CREATE TABLE schedule(
train_id integer,
stop_id integer,
stop_name text,
sched_arrive_time text,
sched_arrive_delay text,
sched_departure_time text,
sched_departure_delay text
)''')
self.__commit()
def get_trains(self):
for row in self.__execute('SELECT DISTINCT train_number FROM trains'):
yield row[0]
def update_train(self, id, number, operator, date, relation):
self.__execute('DELETE FROM trains WHERE train_id = ?', (id,))
self.__execute('''INSERT INTO trains VALUES (
?, ?, ?, ?, ?)''',
(id, number, operator, date, relation)
)
self.__commit()
def update_schedule(self, id, schedule):
self.__execute('DELETE FROM schedule WHERE train_id = ?', (id,))
stop_id = 1
for stop in schedule:
self.__execute('''INSERT INTO schedule VALUES (
?, ?, ?, ?, ?, ?, ?)''',
(id, stop_id, stop[0], stop[1], stop[2], stop[3], stop[4])
)
stop_id += 1
self.__commit()
def get_train_schedules(self, name):
for row in self.__execute('SELECT train_id FROM trains WHERE train_number = ? ORDER BY train_id', (name,)):
yield row[0]
def get_schedule_info(self, id):
for row in self.__execute('SELECT * FROM schedule WHERE train_id = ? ORDER BY stop_id', (id,)):
yield row
| import sqlite3
class Db:
def __init__(self, db_name):
self.conn = sqlite3.connect(db_name)
def __del__(self):
self.conn.close()
def __execute(self, sql, args = tuple()):
c = self.conn.cursor()
c.execute(sql, args)
return c
def __commit(self):
self.conn.commit()
def remove(self):
self.__execute('DROP TABLE IF EXISTS trains')
self.__execute('DROP TABLE IF EXISTS schedule')
self.__commit()
def create(self):
self.__execute('''CREATE TABLE trains(
train_id integer PRIMARY KEY,
train_number text,
train_operator text,
train_date text,
train_relation text
)''')
self.__execute('''CREATE TABLE schedule(
train_id integer,
stop_id integer,
stop_name text,
sched_arrive_time text,
sched_arrive_delay text,
sched_departure_time text,
sched_departure_delay text
)''')
self.__commit()
def get_trains(self):
for row in self.__execute('SELECT DISTINCT train_number FROM trains'):
yield row[0]
def update_train(self, id, number, operator, date, relation):
self.__execute('DELETE FROM trains WHERE train_id = ?', (id,))
self.__execute('''INSERT INTO trains VALUES (
?, ?, ?, ?, ?)''',
(id, number, operator, date, relation)
)
self.__commit()
def update_schedule(self, id, schedule):
self.__execute('DELETE FROM schedule WHERE train_id = ?', (id,))
stop_id = 1
for stop in schedule:
self.__execute('''INSERT INTO schedule VALUES (
?, ?, ?, ?, ?, ?, ?)''',
(id, stop_id, stop[0], stop[1], stop[2], stop[3], stop[4])
)
self.__commit()
def get_train_schedules(self, name):
for row in self.__execute('SELECT train_id FROM trains WHERE train_number = ? ORDER BY train_id', (name,)):
yield row[0]
def get_schedule_info(self, id):
for row in self.__execute('SELECT * FROM schedule WHERE train_id = ? ORDER BY stop_id', (id,)):
yield row
| Python | 0 |
175a8007ef06bbf3a01943c161a162adbf23d7fd | Use tf.gfile instead of os.path in sequence_generator.py for internal compatibility. (#178) | magenta/lib/sequence_generator.py | magenta/lib/sequence_generator.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for sequence generators.
Provides a uniform interface for interacting with generators for any model.
"""
import abc
# internal imports
import tensorflow as tf
class SequenceGeneratorException(Exception):
"""Generic exception for sequence generation errors."""
pass
class BaseSequenceGenerator(object):
"""Abstract class for generators."""
__metaclass__ = abc.ABCMeta
def __init__(self, details, checkpoint):
"""Constructs a BaseSequenceGenerator.
Args:
details: A generator_pb2.GeneratorDetails for this generator.
checkpoint: Where to look for the most recent model checkpoint. Either a
directory to be used with tf.train.latest_checkpoint or the path to a
single checkpoint file.
"""
self._details = details
self._checkpoint = checkpoint
self._initialized = False
def get_details(self):
"""Returns a GeneratorDetails description of this generator."""
return self._details
@abc.abstractmethod
def _initialize(self, checkpoint_file):
"""Implementation for building the TF graph.
Args:
checkpoint_file: The path to the checkpoint file that should be used.
"""
pass
@abc.abstractmethod
def _close(self):
"""Implementation for closing the TF session."""
pass
@abc.abstractmethod
def _generate(self, generate_sequence_request):
"""Implementation for sequence generation based on request.
The implementation can assume that _initialize has been called before this
method is called.
Args:
generate_sequence_request: The request for generating a sequence
Returns:
A GenerateSequenceResponse proto.
"""
pass
def initialize(self):
"""Builds the TF graph and loads the checkpoint.
If the graph has already been initialized, this is a no-op.
Raises:
SequenceGeneratorException: If the checkpoint cannot be found.
"""
if not self._initialized:
if not tf.gfile.Exists(self._checkpoint):
raise SequenceGeneratorException(
'Checkpoint path does not exist: %s' % (self._checkpoint))
checkpoint_file = self._checkpoint
# If this is a directory, try to determine the latest checkpoint in it.
if tf.gfile.IsDirectory(checkpoint_file):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_file)
if checkpoint_file is None:
raise SequenceGeneratorException(
'No checkpoint file found in directory: %s' % self._checkpoint)
if (not tf.gfile.Exists(checkpoint_file) or
tf.gfile.IsDirectory(checkpoint_file)):
raise SequenceGeneratorException(
'Checkpoint path is not a file: %s (supplied path: %s)' % (
checkpoint_file, self._checkpoint))
self._initialize(checkpoint_file)
self._initialized = True
def close(self):
"""Closes the TF session.
If the session was already closed, this is a no-op.
"""
if self._initialized:
self._close()
self._initialized = False
def __enter__(self):
"""When used as a context manager, initializes the TF session."""
self.initialize()
return self
def __exit__(self, *args):
"""When used as a context manager, closes the TF session."""
self.close()
def generate(self, generate_sequence_request):
"""Generates a sequence from the model based on the request.
Also initializes the TF graph if not yet initialized.
Args:
generate_sequence_request: The request for generating a sequence
Returns:
A GenerateSequenceResponse proto.
"""
self.initialize()
return self._generate(generate_sequence_request)
| # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for sequence generators.
Provides a uniform interface for interacting with generators for any model.
"""
import abc
import os
# internal imports
import tensorflow as tf
class SequenceGeneratorException(Exception):
"""Generic exception for sequence generation errors."""
pass
class BaseSequenceGenerator(object):
"""Abstract class for generators."""
__metaclass__ = abc.ABCMeta
def __init__(self, details, checkpoint):
"""Constructs a BaseSequenceGenerator.
Args:
details: A generator_pb2.GeneratorDetails for this generator.
checkpoint: Where to look for the most recent model checkpoint. Either a
directory to be used with tf.train.latest_checkpoint or the path to a
single checkpoint file.
"""
self._details = details
self._checkpoint = checkpoint
self._initialized = False
def get_details(self):
"""Returns a GeneratorDetails description of this generator."""
return self._details
@abc.abstractmethod
def _initialize(self, checkpoint_file):
"""Implementation for building the TF graph.
Args:
checkpoint_file: The path to the checkpoint file that should be used.
"""
pass
@abc.abstractmethod
def _close(self):
"""Implementation for closing the TF session."""
pass
@abc.abstractmethod
def _generate(self, generate_sequence_request):
"""Implementation for sequence generation based on request.
The implementation can assume that _initialize has been called before this
method is called.
Args:
generate_sequence_request: The request for generating a sequence
Returns:
A GenerateSequenceResponse proto.
"""
pass
def initialize(self):
"""Builds the TF graph and loads the checkpoint.
If the graph has already been initialized, this is a no-op.
Raises:
SequenceGeneratorException: If the checkpoint cannot be found.
"""
if not self._initialized:
if not os.path.exists(self._checkpoint):
raise SequenceGeneratorException(
'Checkpoint path does not exist: %s' % (self._checkpoint))
checkpoint_file = self._checkpoint
# If this is a directory, try to determine the latest checkpoint in it.
if os.path.isdir(checkpoint_file):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_file)
if not os.path.isfile(checkpoint_file):
raise SequenceGeneratorException(
'Checkpoint path is not a file: %s (supplied path: %s)' % (
checkpoint_file, self._checkpoint))
self._initialize(checkpoint_file)
self._initialized = True
def close(self):
"""Closes the TF session.
If the session was already closed, this is a no-op.
"""
if self._initialized:
self._close()
self._initialized = False
def __enter__(self):
"""When used as a context manager, initializes the TF session."""
self.initialize()
return self
def __exit__(self, *args):
"""When used as a context manager, closes the TF session."""
self.close()
def generate(self, generate_sequence_request):
"""Generates a sequence from the model based on the request.
Also initializes the TF graph if not yet initialized.
Args:
generate_sequence_request: The request for generating a sequence
Returns:
A GenerateSequenceResponse proto.
"""
self.initialize()
return self._generate(generate_sequence_request)
| Python | 0 |
1f752237d83c486b94ddcc7f5e3b42eb5951a60b | remove unused imports | pabot/SharedLibrary.py | pabot/SharedLibrary.py | from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.Remote import Remote
from robot.api import logger
from robot.running.testlibraries import TestLibrary
from robotremoteserver import RemoteLibraryFactory
from .pabotlib import PABOT_QUEUE_INDEX
class SharedLibrary(object):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self, name):
"""
Import a library so that the library instance is shared between executions.
[https://pabot.org/PabotLib.html?ref=log#import-shared-library|Open online docs.]
"""
# FIXME: RELATIVE IMPORTS WITH FILE NAME
self._remote = None
if BuiltIn().get_variable_value('${%s}' % PABOT_QUEUE_INDEX) is None:
logger.debug("Not currently running pabot. Importing library for this process.")
self._lib = RemoteLibraryFactory(TestLibrary(name).get_instance())
return
uri = BuiltIn().get_variable_value('${PABOTLIBURI}')
logger.debug('PabotLib URI %r' % uri)
remotelib = Remote(uri) if uri else None
if remotelib:
try:
port = remotelib.run_keyword("import_shared_library", [name], {})
except RuntimeError:
logger.error('No connection - is pabot called with --pabotlib option?')
raise
self._remote = Remote("http://127.0.0.1:%s" % port)
logger.debug("Lib imported with name %s from http://127.0.0.1:%s" % (name, port))
else:
logger.error('No connection - is pabot called with --pabotlib option?')
raise AssertionError('No connection to pabotlib')
def get_keyword_names(self):
if self._remote:
return self._remote.get_keyword_names()
return self._lib.get_keyword_names()
def run_keyword(self, name, args, kwargs):
if self._remote:
return self._remote.run_keyword(name, args, kwargs)
result = self._lib.run_keyword(name, args, kwargs)
if result['status'] == 'FAIL':
raise AssertionError(result['error'])
return result['return'] | from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.Remote import Remote
from robot.api import logger
from robot.running.testlibraries import TestLibrary
from robot.running.context import EXECUTION_CONTEXTS
from robot.running.model import Keyword
from robotremoteserver import RemoteLibraryFactory
from .pabotlib import PABOT_QUEUE_INDEX
class SharedLibrary(object):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self, name):
"""
Import a library so that the library instance is shared between executions.
[https://pabot.org/PabotLib.html?ref=log#import-shared-library|Open online docs.]
"""
# FIXME: RELATIVE IMPORTS WITH FILE NAME
self._remote = None
if BuiltIn().get_variable_value('${%s}' % PABOT_QUEUE_INDEX) is None:
logger.debug("Not currently running pabot. Importing library for this process.")
self._lib = RemoteLibraryFactory(TestLibrary(name).get_instance())
return
uri = BuiltIn().get_variable_value('${PABOTLIBURI}')
logger.debug('PabotLib URI %r' % uri)
remotelib = Remote(uri) if uri else None
if remotelib:
try:
port = remotelib.run_keyword("import_shared_library", [name], {})
except RuntimeError:
logger.error('No connection - is pabot called with --pabotlib option?')
raise
self._remote = Remote("http://127.0.0.1:%s" % port)
logger.debug("Lib imported with name %s from http://127.0.0.1:%s" % (name, port))
else:
logger.error('No connection - is pabot called with --pabotlib option?')
raise AssertionError('No connection to pabotlib')
def get_keyword_names(self):
if self._remote:
return self._remote.get_keyword_names()
return self._lib.get_keyword_names()
def run_keyword(self, name, args, kwargs):
if self._remote:
return self._remote.run_keyword(name, args, kwargs)
result = self._lib.run_keyword(name, args, kwargs)
if result['status'] == 'FAIL':
raise AssertionError(result['error'])
return result['return'] | Python | 0.000001 |
140543f86b3947514c199ca770ee4799d2fe96a6 | clean up | jsol.py | jsol.py | #!/usr/bin/env python
import json
def _add(args, env):
args = map(lambda x: _Eval(x, env), args)
return sum(args)
def _sub(args, env):
args = map(lambda x: _Eval(x, env), args)
return reduce(lambda x, y: x - y, args)
def _mult(args, env):
args = map(lambda x: _Eval(x, env), args)
return reduce(lambda x, y: x * y, args)
def _div(args, env):
args = map(lambda x: _Eval(x, env), args)
return reduce(lambda x, y: x / y, args)
def _print(args, env):
args = map(lambda x: _Eval(x, env), args)
for i in args:
print i,
print
return 0
def _lt(args, env):
return _Eval(args[0], env) < _Eval(args[1], env)
def _gt(args, env):
return _Eval(args[0], env) > _Eval(args[1], env)
def _eq(args, env):
return _Eval(args[0], env) == _Eval(args[1], env)
OPS = {
'+': _add,
'-': _sub,
'*': _mult,
'/': _div,
'<': _lt,
'>': _gt,
'=': _eq,
'print': _print
}
def _Error(message):
print message
exit(0)
def ExecuteStatements(statements, env):
for statement in statements[:-1]:
_Eval(statement, env)
return _Eval(statements[-1], env)
def _IfBlock(exp, env):
if _Eval(exp[1], env):
return ExecuteStatements(exp[2], env)
index = 3
while exp[index] == 'elif':
if _Eval(exp[index + 1], env):
return ExecuteStatements(exp[index + 2], env)
index += 3
return ExecuteStatements(exp[-1], env)
def _ForBlock(exp, env):
_Eval(exp[1], env)
while _Eval(exp[2], env):
ret = ExecuteStatements(exp[-1], env)
_Eval(exp[3], env)
return ret
def _Eval(exp, env):
if type(exp) == dict and 'def' in exp:
return ExecuteStatements(exp['def'], env)
if type(exp) in [int, long, float, bool]:
return exp
if type(exp) in [str, unicode]:
if exp in OPS:
_Error('%s is a keyword.' % exp)
if exp not in env:
_Error('Variable %s not bound.' % exp)
return env[exp]
elif type(exp) == dict:
ret = 0
for var in exp:
ret = env[var] = _Eval(exp[var], env)
return ret
elif type(exp) == list:
name = exp[0]
args = exp[1:]
if name in OPS:
return OPS[name](exp[1:], env)
if name == 'if':
return _IfBlock(exp, env)
if name == 'for':
return _ForBlock(exp, env)
if name not in env:
_Error('Function %s not in environment.' % exp)
f = env[name]
new_env = {}
for (p, v) in zip(f['params'], args):
new_env[p] = _Eval(v, env)
return _Eval(f, new_env)
else:
return _Eval(exp, env)
_Error('You shouldn\'t be here! ' + exp.__str__())
def Eval(json_dict):
return _Eval(json_dict['main'], json_dict)
def main():
with open('main.jsol', 'r') as f:
j = json.load(f)
print Eval(j)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import json
def add(args, env):
args = map(lambda x: eval(x, env), args)
return sum(args)
def sub(args, env):
args = map(lambda x: eval(x, env), args)
return reduce(lambda x, y: x - y, args)
def mult(args, env):
args = map(lambda x: eval(x, env), args)
return reduce(lambda x, y: x * y, args)
def div(args, env):
args = map(lambda x: eval(x, env), args)
return reduce(lambda x, y: x / y, args)
def print_(args, env):
args = map(lambda x: eval(x, env), args)
for i in args:
print i,
print
return 0
def lt(args, env):
return eval(args[0], env) < eval(args[1], env)
def gt(args, env):
return eval(args[0], env) > eval(args[1], env)
def eq(args, env):
return eval(args[0], env) == eval(args[1], env)
OPS = {
'+': add,
'-': sub,
'*': mult,
'/': div,
'<': lt,
'>': gt,
'=': eq,
'print': print_
}
def Error(message):
print message
exit(0)
def ExecuteStatements(statements, env):
for statement in statements[:-1]:
eval(statement, env)
return eval(statements[-1], env)
def IfBlock(exp, env):
if eval(exp[1], env):
return ExecuteStatements(exp[2], env)
index = 3
while exp[index] == 'elif':
if eval(exp[index + 1], env):
return ExecuteStatements(exp[index + 2], env)
index += 3
return ExecuteStatements(exp[-1], env)
def ForBlock(exp, env):
eval(exp[1], env)
while eval(exp[2], env):
ret = ExecuteStatements(exp[-1], env)
eval(exp[3], env)
return ret
def eval(exp, env):
if type(exp) == dict and 'def' in exp:
return ExecuteStatements(exp['def'], env)
if type(exp) in [int, long, float, bool]:
return exp
if type(exp) in [str, unicode]:
if exp in OPS:
Error('%s is a keyword.' % exp)
if exp not in env:
Error('Variable %s not bound.' % exp)
return env[exp]
elif type(exp) == dict:
ret = 0
for var in exp:
ret = env[var] = eval(exp[var], env)
return ret
elif type(exp) == list:
name = exp[0]
args = exp[1:]
if name in OPS:
return OPS[name](exp[1:], env)
if name == 'if':
return IfBlock(exp, env)
if name == 'for':
return ForBlock(exp, env)
if name not in env:
Error('Function %s not in environment.' % exp)
f = env[name]
new_env = {}
for (p, v) in zip(f['params'], args):
new_env[p] = eval(v, env)
return eval(f, new_env)
else:
return eval(exp, env)
Error('You shouldn\'t be here! ' + exp.__str__())
def main():
with open('main.jsol', 'r') as f:
j = json.load(f)
print eval(j['main'], j)
if __name__ == '__main__':
main()
| Python | 0.000001 |
1f4ef496f932ec2a12d348b0c90b1f57d6ef9e20 | update version number | nutils/__init__.py | nutils/__init__.py | import numpy
from distutils.version import LooseVersion
assert LooseVersion(numpy.version.version) >= LooseVersion('1.8'), 'nutils requires numpy 1.8 or higher, got %s' % numpy.version.version
version = '2.0beta'
_ = numpy.newaxis
__all__ = [ '_', 'numpy', 'core', 'numeric', 'element', 'function',
'mesh', 'plot', 'library', 'topology', 'util', 'matrix', 'parallel', 'log',
'debug', 'cache', 'transform', 'rational' ]
| import numpy
from distutils.version import LooseVersion
assert LooseVersion(numpy.version.version) >= LooseVersion('1.8'), 'nutils requires numpy 1.8 or higher, got %s' % numpy.version.version
version = '1.dev'
_ = numpy.newaxis
__all__ = [ '_', 'numpy', 'core', 'numeric', 'element', 'function',
'mesh', 'plot', 'library', 'topology', 'util', 'matrix', 'parallel', 'log',
'debug', 'cache', 'transform', 'rational' ]
| Python | 0.000002 |
577697301f8682293a00a793807687df9d0ce679 | Fix fetch_ceph_keys to run in python3 | docker/ceph/ceph-mon/fetch_ceph_keys.py | docker/ceph/ceph-mon/fetch_ceph_keys.py | #!/usr/bin/python
# Copyright 2015 Sam Yaple
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a stripped down version of an ansible module I wrote in Yaodu to
# achieve the same goals we have for Kolla. I have relicensed it for Kolla
# https://github.com/SamYaple/yaodu/blob/master/ansible/library/bslurp
# Basically this module will fetch the admin and mon keyrings as well as the
# monmap file. It then hashes the content, compresses them, and finally it
# converts them to base64 to be safely transported around with ansible
import base64
import hashlib
import json
import os
import sys
import zlib
def json_exit(msg=None, failed=False, changed=False):
if type(msg) is not dict:
msg = {'msg': str(msg)}
msg.update({'failed': failed, 'changed': changed})
print(json.dumps(msg))
sys.exit()
def read_file(filename):
filename_path = os.path.join('/etc/ceph', filename)
if not os.path.exists(filename_path):
json_exit("file not found: {}".format(filename_path), failed=True)
if not os.access(filename_path, os.R_OK):
json_exit("file not readable: {}".format(filename_path), failed=True)
with open(filename_path, 'rb') as f:
raw_data = f.read()
# TODO(mnasiadka): Remove sha1 in U
return {'content': (base64.b64encode(zlib.compress(raw_data))).decode(),
'sha1': hashlib.sha1(raw_data).hexdigest(),
'sha256': hashlib.sha256(raw_data).hexdigest(),
'filename': filename}
def main():
admin_keyring = 'ceph.client.admin.keyring'
mon_keyring = 'ceph.client.mon.keyring'
rgw_keyring = 'ceph.client.radosgw.keyring'
monmap = 'ceph.monmap'
files = [admin_keyring, mon_keyring, rgw_keyring, monmap]
json_exit({filename: read_file(filename) for filename in files})
if __name__ == '__main__':
main()
| #!/usr/bin/python
# Copyright 2015 Sam Yaple
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a stripped down version of an ansible module I wrote in Yaodu to
# achieve the same goals we have for Kolla. I have relicensed it for Kolla
# https://github.com/SamYaple/yaodu/blob/master/ansible/library/bslurp
# Basically this module will fetch the admin and mon keyrings as well as the
# monmap file. It then hashes the content, compresses them, and finally it
# converts them to base64 to be safely transported around with ansible
import base64
import hashlib
import json
import os
import sys
import zlib
def json_exit(msg=None, failed=False, changed=False):
if type(msg) is not dict:
msg = {'msg': str(msg)}
msg.update({'failed': failed, 'changed': changed})
print(json.dumps(msg))
sys.exit()
def read_file(filename):
filename_path = os.path.join('/etc/ceph', filename)
if not os.path.exists(filename_path):
json_exit("file not found: {}".format(filename_path), failed=True)
if not os.access(filename_path, os.R_OK):
json_exit("file not readable: {}".format(filename_path), failed=True)
with open(filename_path, 'rb') as f:
raw_data = f.read()
# TODO(mnasiadka): Remove sha1 in U
return {'content': base64.b64encode(zlib.compress(raw_data)),
'sha1': hashlib.sha1(raw_data).hexdigest(),
'sha256': hashlib.sha256(raw_data).hexdigest(),
'filename': filename}
def main():
admin_keyring = 'ceph.client.admin.keyring'
mon_keyring = 'ceph.client.mon.keyring'
rgw_keyring = 'ceph.client.radosgw.keyring'
monmap = 'ceph.monmap'
files = [admin_keyring, mon_keyring, rgw_keyring, monmap]
json_exit({filename: read_file(filename) for filename in files})
if __name__ == '__main__':
main()
| Python | 0.000016 |
b4acd028b613a721ffbe5a3136700f190635f7c9 | Fix import. | tests/basics/class_store_class.py | tests/basics/class_store_class.py | # Inspired by urlparse.py from CPython 3.3 stdlib
# There was a bug in MicroPython that under some conditions class stored
# in instance attribute later was returned "bound" as if it was a method,
# which caused class constructor to receive extra argument.
from _collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'foo bar')
class _ResultMixinStr(object):
def encode(self):
return self._encoded_counterpart(*(x.encode() for x in self))
class _ResultMixinBytes(object):
def decode(self):
return self._decoded_counterpart(*(x.decode() for x in self))
class DefragResult(_DefragResultBase, _ResultMixinStr):
pass
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
pass
DefragResult._encoded_counterpart = DefragResultBytes
DefragResultBytes._decoded_counterpart = DefragResult
# Due to differences in type and native subclass printing,
# the best thing we can do here is to just test that no exceptions
# happen
#print(DefragResult, DefragResult._encoded_counterpart)
#print(DefragResultBytes, DefragResultBytes._decoded_counterpart)
o1 = DefragResult("a", "b")
#print(o1, type(o1))
o2 = DefragResultBytes("a", "b")
#print(o2, type(o2))
#print(o1._encoded_counterpart)
_o1 = o1.encode()
print(_o1[0], _o1[1])
#print(_o1, type(_o1))
print("All's ok")
| # Inspired by urlparse.py from CPython 3.3 stdlib
# There was a bug in MicroPython that under some conditions class stored
# in instance attribute later was returned "bound" as if it was a method,
# which caused class constructor to receive extra argument.
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'foo bar')
class _ResultMixinStr(object):
def encode(self):
return self._encoded_counterpart(*(x.encode() for x in self))
class _ResultMixinBytes(object):
def decode(self):
return self._decoded_counterpart(*(x.decode() for x in self))
class DefragResult(_DefragResultBase, _ResultMixinStr):
pass
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
pass
DefragResult._encoded_counterpart = DefragResultBytes
DefragResultBytes._decoded_counterpart = DefragResult
# Due to differences in type and native subclass printing,
# the best thing we can do here is to just test that no exceptions
# happen
#print(DefragResult, DefragResult._encoded_counterpart)
#print(DefragResultBytes, DefragResultBytes._decoded_counterpart)
o1 = DefragResult("a", "b")
#print(o1, type(o1))
o2 = DefragResultBytes("a", "b")
#print(o2, type(o2))
#print(o1._encoded_counterpart)
_o1 = o1.encode()
print(_o1[0], _o1[1])
#print(_o1, type(_o1))
print("All's ok")
| Python | 0 |
7a331edf955d914c82751eb7ec1dd20896e25f83 | Use SequenceEqual because we care about maintaining order. | tests/cases/stats/tests/kmeans.py | tests/cases/stats/tests/kmeans.py | import os
from django.test import TestCase
from avocado.stats import cluster, kmeans
from scipy.cluster import vq
import numpy
from itertools import chain
__all__ = ('KmeansTestCase',)
random_points_file = open(os.path.join(os.path.dirname(__file__), '../fixtures/random_points.txt'))
random_points_3d_file = open(os.path.join(os.path.dirname(__file__), '../fixtures/random_points_3d.txt'))
random_points = [float(x.strip()) for x in random_points_file.xreadlines()]
random_points_3d = [[float(x) for x in l.strip().split(",")] for l in random_points_3d_file.xreadlines()]
class KmeansTestCase(TestCase):
def test_std_dev(self):
numpy_std_dev = numpy.std(numpy.array(random_points))
our_std_dev = kmeans.std_dev(random_points)
self.assertEqual(numpy_std_dev, our_std_dev)
def test_whiten(self):
scipy_whiten = vq.whiten(numpy.array(random_points))
our_whiten = kmeans.whiten(random_points)
self.assertEqual(len(scipy_whiten), len(our_whiten))
comp_whiten = zip(scipy_whiten, our_whiten)
[self.assertEqual(*comp) for comp in comp_whiten]
scipy_whiten = vq.whiten(numpy.array(random_points_3d))
our_whiten = kmeans.whiten(random_points_3d)
self.assertEqual(len(scipy_whiten), len(our_whiten))
comp_whiten = zip(scipy_whiten, our_whiten)
[self.assertSequenceEqual(scipy_list.tolist(), our_list) for scipy_list, our_list in comp_whiten]
| import os
from django.test import TestCase
from avocado.stats import cluster, kmeans
from scipy.cluster import vq
import numpy
from itertools import chain
__all__ = ('KmeansTestCase',)
random_points_file = open(os.path.join(os.path.dirname(__file__), '../fixtures/random_points.txt'))
random_points_3d_file = open(os.path.join(os.path.dirname(__file__), '../fixtures/random_points_3d.txt'))
random_points = [float(x.strip()) for x in random_points_file.xreadlines()]
random_points_3d = [[float(x) for x in l.strip().split(",")] for l in random_points_3d_file.xreadlines()]
class KmeansTestCase(TestCase):
def test_std_dev(self):
numpy_std_dev = numpy.std(numpy.array(random_points))
our_std_dev = kmeans.std_dev(random_points)
self.assertEqual(numpy_std_dev, our_std_dev)
def test_whiten(self):
scipy_whiten = vq.whiten(numpy.array(random_points))
our_whiten = kmeans.whiten(random_points)
self.assertEqual(len(scipy_whiten), len(our_whiten))
comp_whiten = zip(scipy_whiten, our_whiten)
[self.assertEqual(*comp) for comp in comp_whiten]
scipy_whiten = vq.whiten(numpy.array(random_points_3d))
our_whiten = kmeans.whiten(random_points_3d)
self.assertEqual(len(scipy_whiten), len(our_whiten))
comp_whiten = zip(scipy_whiten, our_whiten)
[self.assertListEqual(scipy_list.tolist(), our_list) for scipy_list, our_list in comp_whiten]
| Python | 0 |
268914e7a29231da882457a6e4744c9661526a73 | Add latest version of py-tabulate (#14138) | var/spack/repos/builtin/packages/py-tabulate/package.py | var/spack/repos/builtin/packages/py-tabulate/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTabulate(PythonPackage):
"""Pretty-print tabular data"""
homepage = "https://bitbucket.org/astanin/python-tabulate"
url = "https://pypi.io/packages/source/t/tabulate/tabulate-0.8.6.tar.gz"
version('0.8.6', sha256='5470cc6687a091c7042cee89b2946d9235fe9f6d49c193a4ae2ac7bf386737c8')
version('0.8.3', sha256='8af07a39377cee1103a5c8b3330a421c2d99b9141e9cc5ddd2e3263fea416943')
version('0.7.7', sha256='83a0b8e17c09f012090a50e1e97ae897300a72b35e0c86c0b53d3bd2ae86d8c6')
depends_on('py-setuptools', type='build')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTabulate(PythonPackage):
"""Pretty-print tabular data"""
homepage = "https://bitbucket.org/astanin/python-tabulate"
url = "https://pypi.io/packages/source/t/tabulate/tabulate-0.7.7.tar.gz"
version('0.7.7', sha256='83a0b8e17c09f012090a50e1e97ae897300a72b35e0c86c0b53d3bd2ae86d8c6')
depends_on('py-setuptools', type='build')
| Python | 0 |
b286e10d7d7c43ceea80cd4025105851ebb9bd8f | Comment out save statement | s4v3.py | s4v3.py | from s4v2 import *
import openpyxl
from openpyxl import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def save_spreadsheet(filename, data_sample):
wb = Workbook() # shortcut for typing Workbook function
ws = wb.active # shortcut for typing active workbook function and also, for finding the sheet in the workbook that we're working on, the active one.
row_index = 1 # set the row index to 1, the starting point for excel, i.e. the upper left-hand corner
for rows in data_sample: # iterate through the rows in the spreadsheet
col_index = 1 # set the col index to 1 (starting point for excel, i.e. the upper left-hand corner)
for field in rows:
col_letter = get_column_letter(col_index) # use the imported get column letter function to get the letter of the column that we're working in.
ws.cell('{}{}'.format(col_letter, row_index)).value = field # I'm not entirely sure what we're doing here because I haven't worked with these function before, but my guess is that we're writing the values in the field of the data sample into the current cell of the new workbook
col_index += 1 # increase column index
row_index += 1 # increase row index
wb.save(filename)
kiton_ties = filter_col_by_string(data_from_csv, "brandName", "Kiton")
# save_spreadsheet("_data/s4-kiton.xlsx", kiton_ties) | from s4v2 import *
import openpyxl
from openpyxl import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def save_spreadsheet(filename, data_sample):
wb = Workbook() # shortcut for typing Workbook function
ws = wb.active # shortcut for typing active workbook function and also, for finding the sheet in the workbook that we're working on, the active one.
row_index = 1 # set the row index to 1, the starting point for excel, i.e. the upper left-hand corner
for rows in data_sample: # iterate through the rows in the spreadsheet
col_index = 1 # set the col index to 1 (starting point for excel, i.e. the upper left-hand corner)
for field in rows:
col_letter = get_column_letter(col_index) # use the imported get column letter function to get the letter of the column that we're working in.
ws.cell('{}{}'.format(col_letter, row_index)).value = field # I'm not entirely sure what we're doing here because I haven't worked with these function before, but my guess is that we're writing the values in the field of the data sample into the current cell of the new workbook
col_index += 1 # increase column index
row_index += 1 # increase row index
wb.save(filename)
kiton_ties = filter_col_by_string(data_from_csv, "brandName", "Kiton")
save_spreadsheet("_data/s4-kiton.xlsx", kiton_ties) | Python | 0 |
5dd61d20f14ecbe1bc20fe8db3fd73a78707485a | Refactor partition. | lazy.py | lazy.py | import operator as op
import itertools as it
from functools import partial
from collections import deque
class Wrapper(object):
def __init__(self, data):
self.data = data
def __lt__(self, other):
print 'comparing', self.data, other.data
return self.data < other.data
def partition(predicate, iterable):
passing, failing = deque(), deque()
def gen(f, mine, other):
while True:
if mine:
yield mine.popleft()
else:
newval = next(iterable)
if f(newval):
yield newval
else:
other.append(newval)
return (
gen(predicate, passing, failing),
gen(lambda i: not(predicate(i)), failing, passing)
)
def isorted(xs):
xs = iter(xs)
pivot = next(xs)
below, above = partition(lambda y: y < pivot, xs)
for x in isorted(below):
yield x
yield pivot
for x in isorted(above):
yield x
def imin(xs):
return next(isorted(xs))
def insmallest(n, xs):
return it.islice(isorted(xs), 0, n)
| import operator as op
import itertools as it
from functools import partial
class Wrapper(object):
def __init__(self, data):
self.data = data
def __lt__(self, other):
print 'comparing', self.data, other.data
return self.data < other.data
def partition(predicate, iterable):
pack = partial(it.imap, lambda i: (predicate(i), i))
new_pred = op.itemgetter(0)
unpack = partial(it.imap, op.itemgetter(1))
packed = pack(iterable)
first, second = it.tee(packed)
passing = it.ifilter(new_pred, first)
failing = it.ifilterfalse(new_pred, second)
return map(unpack, (passing, failing))
def isorted(xs):
xs = iter(xs)
pivot = next(xs)
below, above = partition(lambda y: y < pivot, i)
for x in isorted(below):
yield x
yield pivot
for x in isorted(above):
yield x
def imin(xs):
return next(isorted(xs))
def insmallest(n, xs):
return it.islice(isorted(xs), 0, n)
| Python | 0 |
14f0afc20c9d6c200c6e9fa52a4121c98d349be7 | Set version 0.2.5 | pages/__init__.py | pages/__init__.py | # -*- coding: utf-8 -*-
VERSION = (0, 2, 5)
__version__ = '.'.join(map(str, VERSION))
| # -*- coding: utf-8 -*-
VERSION = (0, 2, 4)
__version__ = '.'.join(map(str, VERSION))
| Python | 0.000001 |
d282d5525c4d965dbe0a6ee4967a14f1f412f2b4 | update version number from 1.4 to 1.5 | oauth2/_version.py | oauth2/_version.py | # This is the version of this source code.
manual_verstr = "1.5"
auto_build_num = "143"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
| # This is the version of this source code.
manual_verstr = "1.4"
auto_build_num = "143"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
| Python | 0.000009 |
7bfc2287d15198d9e37b4def4632481c8446a932 | bump version | bread/__init__.py | bread/__init__.py | VERSION = '0.6.0'
| VERSION = '0.5.1'
| Python | 0 |
928c3bb38f4fa24d082ea18db09ff4542b78466c | remove units from x gt 1 example | docs/source/examples/x_greaterthan_1.py | docs/source/examples/x_greaterthan_1.py | from gpkit import Variable, GP
# Decision variable
x = Variable('x')
# Constraint
constraints = [x >= 1]
# Objective (to minimize)
objective = x
# Formulate the GP
gp = GP(objective, constraints)
# Solve the GP
sol = gp.solve()
# Print results table
print sol.table()
| from gpkit import Variable, GP
# Decision variable
x = Variable("x", "m", "A really useful variable called x with units of meters")
# Constraint
constraint = [1/x <= 1]
# Objective (to minimize)
objective = x
# Formulate the GP
gp = GP(objective, constraint)
# Solve the GP
sol = gp.solve()
# Print results table
print sol.table() | Python | 0 |
5d30c02f9adb7de3ce9eebef5178466711d96c64 | Remove unused import: `RelatedField` | rest_framework_json_api/utils.py | rest_framework_json_api/utils.py | from django.utils.encoding import force_text
from django.utils.text import slugify
try:
from rest_framework.serializers import ManyRelatedField
except ImportError:
ManyRelatedField = type(None)
try:
from rest_framework.serializers import ListSerializer
except ImportError:
ListSerializer = type(None)
def get_related_field(field):
if isinstance(field, ManyRelatedField):
return field.child_relation
if isinstance(field, ListSerializer):
return field.child
return field
def is_related_many(field):
if hasattr(field, "many"):
return field.many
if isinstance(field, ManyRelatedField):
return True
if isinstance(field, ListSerializer):
return True
return False
def model_from_obj(obj):
model = getattr(obj, "model", None)
if model is not None:
return model
queryset = getattr(obj, "queryset", None)
if queryset is not None:
return queryset.model
return None
def model_to_resource_type(model):
'''Return the verbose plural form of a model name, with underscores
Examples:
Person -> "people"
ProfileImage -> "profile_image"
'''
if model is None:
return "data"
return force_text(model._meta.verbose_name_plural)
#
# String conversion
#
def camelcase(string):
'''Return a string in lowerCamelCase
Examples:
"people" -> "people"
"profile images" -> "profileImages"
'''
out = slug(string).replace('-', ' ').title().replace(' ', '')
return out[0].lower() + out[1:]
def slug(string):
'''Return a string where words are connected with hyphens'''
return slugify(force_text(string))
def snakecase(string):
'''Return a string where words are connected with underscores
Examples:
"people" -> "people"
"profile images" -> "profile_images"
'''
return slug(string).replace('-', '_')
| from django.utils.encoding import force_text
from django.utils.text import slugify
from rest_framework.serializers import RelatedField
try:
from rest_framework.serializers import ManyRelatedField
except ImportError:
ManyRelatedField = type(None)
try:
from rest_framework.serializers import ListSerializer
except ImportError:
ListSerializer = type(None)
def get_related_field(field):
if isinstance(field, ManyRelatedField):
return field.child_relation
if isinstance(field, ListSerializer):
return field.child
return field
def is_related_many(field):
if hasattr(field, "many"):
return field.many
if isinstance(field, ManyRelatedField):
return True
if isinstance(field, ListSerializer):
return True
return False
def model_from_obj(obj):
model = getattr(obj, "model", None)
if model is not None:
return model
queryset = getattr(obj, "queryset", None)
if queryset is not None:
return queryset.model
return None
def model_to_resource_type(model):
'''Return the verbose plural form of a model name, with underscores
Examples:
Person -> "people"
ProfileImage -> "profile_image"
'''
if model is None:
return "data"
return force_text(model._meta.verbose_name_plural)
#
# String conversion
#
def camelcase(string):
'''Return a string in lowerCamelCase
Examples:
"people" -> "people"
"profile images" -> "profileImages"
'''
out = slug(string).replace('-', ' ').title().replace(' ', '')
return out[0].lower() + out[1:]
def slug(string):
'''Return a string where words are connected with hyphens'''
return slugify(force_text(string))
def snakecase(string):
'''Return a string where words are connected with underscores
Examples:
"people" -> "people"
"profile images" -> "profile_images"
'''
return slug(string).replace('-', '_')
| Python | 0 |
07ac69ef3f722ae57bc0cc61c30a2378c8c53c2e | Fix mutable default argument problem | live.py | live.py | """ Parses http://www.live-footballontv.com for info about live matches """
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
url = 'http://www.live-footballontv.com'
headers = {'User-Agent': 'Football Push Notifications'}
def convert_date(date):
"""Returns datetime object
This will allow the script to calculate timedeltas and reformat the date easily"""
regex_date = re.compile(r'(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)+ \d{1,31}(th|rd|nd|st) +\w* \d\d\d\d')
if not regex_date.match(date):
raise Exception('Date was not the correct format')
date = date.split(' ')
date[1] = date[1][:-2]
if len(date[1]) == 1:
date[1] = '0'+date[1]
date = ' '.join(date)
date_format = '%A %d %B %Y'
date_object = datetime.strptime(date, date_format)
return date_object
def register_match(match, date):
"""Parses the match item into a simple dict"""
kotime = match[2].text
if kotime == 'TBC':
kotime = '12:00'
kotime = kotime.split(':')
# Date of match plus the kick off time
kotime = date + timedelta(hours=int(kotime[0]), minutes=int(kotime[1]))
match_dict = {
"matchfixture": match[0].text,
"competition": match[1].text,
"kickofftime": kotime,
"channels": match[3].text
}
return match_dict
def search_matches(match_list, search_list, ignore_list=None):
"""Return list of football matches that match search"""
if ignore_list is None:
ignore_list = []
search = re.compile('|'.join(search_list))
my_matches = [matches for matches in match_list if search.search(matches['matchfixture'])]
if ignore_list:
ignore = re.compile('|'.join(ignore_list))
my_matches = [match for match in my_matches if not ignore.search(match["matchfixture"])]
return my_matches
def gather_data():
"""Returns the list of matches"""
soup = BeautifulSoup(requests.get(url, headers=headers).text, "html.parser")
# Get rid of <hr> cruft
for node in soup.findAll('hr'):
node.replaceWithChildren()
# Get the date nodes
result = soup.find_all('div', class_='span12 matchdate')
dates = []
for item in result:
dates.append(item.parent)
# Holds the list of dictionaries
matches = []
for item in dates:
date = convert_date(item.text)
cursor = item.findNextSibling()
while True:
try:
if cursor.next.attrs == {u'class': [u'span12', u'matchdate']}:
break
else:
matches.append(register_match(cursor.contents, date))
cursor = cursor.findNextSibling()
except Exception:
break
return matches
| """ Parses http://www.live-footballontv.com for info about live matches """
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
url = 'http://www.live-footballontv.com'
headers = {'User-Agent': 'Football Push Notifications'}
def convert_date(date):
"""Returns datetime object
This will allow the script to calculate timedeltas and reformat the date easily"""
regex_date = re.compile(r'(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)+ \d{1,31}(th|rd|nd|st) +\w* \d\d\d\d')
if not regex_date.match(date):
raise Exception('Date was not the correct format')
date = date.split(' ')
date[1] = date[1][:-2]
if len(date[1]) == 1:
date[1] = '0'+date[1]
date = ' '.join(date)
date_format = '%A %d %B %Y'
date_object = datetime.strptime(date, date_format)
return date_object
def register_match(match, date):
"""Parses the match item into a simple dict"""
kotime = match[2].text
if kotime == 'TBC':
kotime = '12:00'
kotime = kotime.split(':')
# Date of match plus the kick off time
kotime = date + timedelta(hours=int(kotime[0]), minutes=int(kotime[1]))
match_dict = {
"matchfixture": match[0].text,
"competition": match[1].text,
"kickofftime": kotime,
"channels": match[3].text
}
return match_dict
def search_matches(match_list, search_list, ignore_list=[]):
"""Return list of football matches that match search"""
search = re.compile('|'.join(search_list))
my_matches = [matches for matches in match_list if search.search(matches['matchfixture'])]
if ignore_list:
ignore = re.compile('|'.join(ignore_list))
my_matches = [match for match in my_matches if not ignore.search(match["matchfixture"])]
return my_matches
def gather_data():
"""Returns the list of matches"""
soup = BeautifulSoup(requests.get(url, headers=headers).text, "html.parser")
# Get rid of <hr> cruft
for node in soup.findAll('hr'):
node.replaceWithChildren()
# Get the date nodes
result = soup.find_all('div', class_='span12 matchdate')
dates = []
for item in result:
dates.append(item.parent)
# Holds the list of dictionaries
matches = []
for item in dates:
date = convert_date(item.text)
cursor = item.findNextSibling()
while True:
try:
if cursor.next.attrs == {u'class': [u'span12', u'matchdate']}:
break
else:
matches.append(register_match(cursor.contents, date))
cursor = cursor.findNextSibling()
except Exception:
break
return matches
| Python | 0.000007 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.