commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
9b3e0c7eb28a67e2383cad6cbfa97fc4fd575756 | Add error classification | classify_logs.py | classify_logs.py | import re
import yaml
error_types = ["no package found",
"unclassified"]
def classify_build_log(log_file):
"""
Takes a build log file object as an input and returns
a tupe `(category, sub-category, sub-category)`
- missing dependency:
- Build Dependency
- Test Dependency
- Runtime error (other than missing dependency)
"""
log = log_file.readlines()
if no_packages_found(log):
return "no package found"
if has_missing_test_dependency(log):
return "missing test dependency"
return "unclassified"
pass
def has_missing_test_dependency(log):
"""
Return: (Status, missing packages)
"""
None
def no_packages_found(log):
p = re.compile(r"Error: No packages found")
return any([re.match(p, line) for line in log])
def classify_all_logs():
packages = yaml.load(file('packages.yaml', 'r'))
log_dir = "./logs/"
for package in packages:
if package['build'] is False:
log_file_name = log_dir + "%s_build.log" % (package['name'])
log_file = open(log_file_name, 'r')
error_type = classify_build_log(log_file)
else:
error_type = None
package['build_error_type'] = error_type
open('packages.yaml', 'w').writelines(yaml.dump(packages))
if __name__ == "__main__":
classify_all_logs()
| Python | 0.000002 | |
aafa99714eff3c5021594ae5021bdd47b41c9c6b | save tpl environs after invoke shell constructor | assets/save_tpl_envs.py | assets/save_tpl_envs.py | # -*- coding:utf-8 -*-
import os
import sys
import json
def save_tpl_envs(path):
envs = {}
for key, value in os.environ.items():
if key.startswith('TPL_'):
envs[key[4:]] = value
with open(path, 'w') as fd:
fd.write(json.dumps(envs))
if __name__ == '__main__':
path = sys.argv[1]
save_tpl_envs(path)
| Python | 0 | |
af9b0ee39d18ca174b19143bdda0d478c4d5a834 | add a driver for hourly reporting | scripts/iemre/rerun_hourly.py | scripts/iemre/rerun_hourly.py | import mx.DateTime
import stage4_hourlyre
sts = mx.DateTime.DateTime(2010,5,1)
ets = mx.DateTime.DateTime(2010,5,13)
interval = mx.DateTime.RelativeDateTime(hours=1)
now = sts
while now < ets:
print now
stage4_hourlyre.merge( now )
now += interval
| Python | 0 | |
0920a23a72e1e14179b75b4d2a50e956ee9deec0 | add skeleton generation file | disaggregator/generate.py | disaggregator/generate.py | from appliance import ApplianceTrace
from appliance import ApplianceInstance
from appliance import ApplianceSet
from appliance import ApplianceType
import fhmm
| Python | 0.000005 | |
8ccab210054c2776a36b7e3648fa1e27eb49a27b | add deeplearning cross-validation NOPASS. PUBDEV-1696. | h2o-py/tests/testdir_algos/deeplearning/pyunit_NOPASS_cv_carsDeepLearning.py | h2o-py/tests/testdir_algos/deeplearning/pyunit_NOPASS_cv_carsDeepLearning.py | import sys
sys.path.insert(1, "../../../")
import h2o
import random
def cv_carsDL(ip,port):
# Connect to h2o
h2o.init(ip,port)
# read in the dataset and construct training set (and validation set)
cars = h2o.import_frame(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = random.sample(range(3),1)[0]
# pick the predictors and the correct response column
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
response_col = "cylinders"
cars[response_col] = cars[response_col].asfactor()
else :
response_col = "economy"
print "Response column: {0}".format(response_col)
## cross-validation
## basic
nfolds = random.randint(3,10)
dl = h2o.deeplearning(y=cars[response_col], x=cars[predictors], nfolds=nfolds)
## boundary case
# nfolds = 0
dl = h2o.deeplearning(y=cars[response_col], x=cars[predictors], nfolds=0)
## error cases
# 1. nfolds == 1 or < 0
# TODO: PUBDEV-1696
try:
dl = h2o.deeplearning(y=cars[response_col], x=cars[predictors], nfolds=random.randint(-10000,-1))
dl = h2o.deeplearning(y=cars[response_col], x=cars[predictors], nfolds=1)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. cross-validation and regular validation attempted
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
try:
dl = h2o.deeplearning(y=train[response_col], x=train[predictors], nfolds=random.randint(3,10),
validation_y=valid[1], validation_x=valid[predictors])
assert False, "Expected model-build to fail when both cross-validation and regular validation is attempted"
except EnvironmentError:
assert True
# TODO: what should the model metrics look like? add cross-validation metric check to pyunit_metric_json_check.
if __name__ == "__main__":
h2o.run_test(sys.argv, cv_carsDL) | Python | 0 | |
1483f6cece70cb5de115ea1edc630e98292a8170 | Add Sorting/Selection.py & Selection() | Sorting/Selection.py | Sorting/Selection.py | # @auther Besir Kurtulmus
# coding: utf-8
'''
The MIT License (MIT)
Copyright (c) 2014 Ahmet Besir Kurtulmus
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from random import choice
from MergeSort import RandomList
def Selection(l, k):
"""
Description:
Args:
Examples:
"""
v = choice(l)
sL = []
sR = []
sV = []
for i in l:
if i < v:
sL.append(i)
elif i == v:
sV.append(i)
elif i > v:
sR.append(i)
if k <= len(sL):
Selection(sL, k)
elif k <= (len(sL) + len(sV)):
return v
elif k > (len(sL) + len(sV)):
Selection(sR, k - len(sL) - len(sV))
else:
return v
| Python | 0 | |
973696b0c50f235cfcef9e0cb30c6fc2f1028058 | add an index for the story_storytags table | storyboard/db/migration/alembic_migrations/versions/063_index_story_storytags.py | storyboard/db/migration/alembic_migrations/versions/063_index_story_storytags.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""index story_storytags
Revision ID: a6e048164572
Revises: 062
Create Date: 2018-06-25 17:13:43.992561
"""
# revision identifiers, used by Alembic.
revision = '063'
down_revision = '062'
from alembic import op
def upgrade(active_plugins=None, options=None):
op.create_index('story_storytags_idx',
'story_storytags', ['story_id'])
def downgrade(active_plugins=None, options=None):
op.drop_index('story_storytags_idx')
| Python | 0.000006 | |
bd4153ff3c0824f7e901dd25e77cdaaeea2072c0 | add tests for basic outdoor pois | test/662-basic-outdoor-pois.py | test/662-basic-outdoor-pois.py | #http://www.openstreetmap.org/node/1387024181
assert_has_feature(
16, 10550, 25297, 'pois',
{ 'kind': 'bbq', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/3497698404
assert_has_feature(
16, 10471, 25343, 'pois',
{ 'kind': 'bicycle_repair_station', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/2910259124
assert_has_feature(
16, 10798, 25903, 'pois',
{ 'kind': 'dive_centre', 'min_zoom': 16 })
#http://www.openstreetmap.org/node/2844159164
assert_has_feature(
16, 18308, 23892, 'pois',
{ 'kind': 'life_ring', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/4083762008
assert_has_feature(
16, 10805, 25927, 'pois',
{ 'kind': 'lifeguard_tower', 'min_zoom': 17 })
#http://www.openstreetmap.org/node/696801847
assert_has_feature(
16, 10597, 25151, 'pois',
{ 'kind': 'picnic_table', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/1128776802
assert_has_feature(
16, 10466, 25372, 'pois',
{ 'kind': 'shower', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/2287784170
assert_has_feature(
16, 10514, 25255, 'pois',
{ 'kind': 'waste_disposal', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/2640323071
assert_has_feature(
16, 10502, 25290, 'pois',
{ 'kind': 'watering_place', 'min_zoom': 18 })
#https://www.openstreetmap.org/node/3954505509
assert_has_feature(
16, 10174, 23848, 'pois',
{ 'kind': 'water_point', 'min_zoom': 18 })
#https://www.openstreetmap.org/node/3984333433
assert_has_feature(
16, 12348, 25363, 'pois',
{ 'kind': 'water_point', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/1978323412
assert_has_feature(
16, 10878, 25000, 'pois',
{ 'kind': 'pylon', 'min_zoom': 17 })
#http://www.openstreetmap.org/node/2398019418
assert_has_feature(
16, 10566, 25333, 'pois',
{ 'kind': 'power_pole', 'min_zoom': 18 })
#http://www.openstreetmap.org/node/1378418272
assert_has_feature(
16, 10480, 25352, 'pois',
{ 'kind': 'power_tower', 'min_zoom': 16 })
#http://www.openstreetmap.org/node/2890101480
assert_has_feature(
16, 11080, 26141, 'pois',
{ 'kind': 'petroleum_well', 'min_zoom': 17 }) | Python | 0 | |
3c4fd0477c7d6f9d0f30654271e73466d192d1e1 | Add data type for vectors | drudge/vec.py | drudge/vec.py | """Vectors and utilities."""
import collections.abc
from sympy import sympify
class Vec:
"""Vectors.
Vectors are the basic non-commutative quantities. Its objects consist of
an base and some indices. The base is allowed to be any Python object,
although small hashable objects, like string, are advised. The indices
are always sympified into SymPy expressions.
Its objects can be created directly by giving the base and indices,
or existing vector objects can be subscripted to get new ones. The
semantics is similar to Haskell functions.
Note that users cannot directly assign to the attributes of this class.
This class can be used by itself, it can also be subclassed for special
use cases.
"""
__slots__ = ['_base', '_indices']
def __init__(self, base, indices=()):
"""Initialize a vector.
Atomic indices are added as the only index. Iterable values will
have all of its entries added.
"""
self._base = base
if not isinstance(indices, collections.abc.Iterable):
indices = (indices,)
self._indices = tuple(sympify(i) for i in indices)
@property
def base(self):
"""Get the base of the vector."""
return self._base
@property
def indices(self):
"""Get the indices of the vector."""
return self._indices
def __getitem__(self, item):
"""Append the given indices to the vector.
When multiple new indices are to be given, they have to be given as a
tuple.
"""
if not isinstance(item, tuple):
item = (item,)
new_indices = tuple(sympify(i) for i in item)
# Pay attention to subclassing.
return type(self)(self.base, self.indices + new_indices)
def __repr__(self):
"""Form repr string form the vector."""
return ''.join([
type(self).__name__, '(', repr(self.base), ', (',
', '.join(repr(i) for i in self.indices),
'))'
])
def __str__(self):
"""Form a more readable string representation."""
return ''.join([
str(self.base), '[', ', '.join(str(i) for i in self.indices), ']'
])
def __hash__(self):
"""Compute the hash value of a vector."""
return hash((self.base, self.indices))
def __eq__(self, other):
"""Compares the equality of two vectors."""
return (
(isinstance(self, type(other)) or isinstance(other, type(self))) and
self.base == other.base and self.indices == other.indices
)
| Python | 0 | |
d0d4688a8768dceeeb5d609a05de72fc24ac6b75 | Create pwned.py | pwned/src/pwned.py | pwned/src/pwned.py | import hashlib, sys, urllib.request
def main():
hash = hashlib.sha1(bytes(sys.argv[1], "utf-8"))
digest = hash.hexdigest().upper()
url = f"https://api.pwnedpasswords.com/range/{digest[:5]}"
request = urllib.request.Request(url, headers={"User-Agent":"API-Programming-Exercise"})
page = urllib.request.urlopen(request)
data = (page.read().decode('utf-8').split())
for i in data:
tmp = i.split(":")
if digest[:5] + tmp[0] == digest:
print(f"{sys.argv[1]} was found")
print(f"Hash {digest}, {tmp[1]} occurrences")
if __name__ == "__main__":
main()
| Python | 0.000001 | |
aca14378e6f7091abed8f25183b36a36170caa76 | Fix State ID number to 8 chars limit | users/models.py | users/models.py | from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
# User-related models
class User(models.Model):
'''
Represents an user. Both organizers and participants are considered as
users. This allows usage of the same accounts for the users that became
organizers later.
'''
# Fields accessible from AuthUser:
# username
# first_name
# last_name
# email
# password
# is_staff
# is_active
# is_superuser
# last_login
# date_joined
authuser = models.OneToOneField(AuthUser)
# personal info
date_of_birth = models.DateTimeField(blank=True)
sex = models.CharField(max_length=1, blank=True, choices=(('M', 'male'),
('F', 'female')))
social_security_number = models.CharField(max_length=11, blank=True)
state_id_number = models.CharField(max_length=8, blank=True)
competes = models.ManyToManyField('competitions.Competition',
through='competitions.CompetitionUserRegistration')
# address information
phone_number = models.CharField(max_length=30, blank=True)
parent_phone_number = models.CharField(max_length=30, blank=True)
address = models.ForeignKey('schools.Address')
# school related info
school = models.ForeignKey('schools.School', blank=True)
school_class = models.CharField(max_length=20, blank=True)
classlevel = models.CharField(max_length=2, blank=True,
choices=(('Z2', 'Z2'),
('Z3', 'Z3'),
('Z4', 'Z4'),
('Z5', 'Z5'),
('Z6', 'Z6'),
('Z7', 'Z7'),
('Z8', 'Z8'),
('Z9', 'Z9'),
('S1', 'S1'),
('S2', 'S2'),
('S3', 'S3'),
('S4', 'S4')))
# Fields added via foreign keys:
# camp_set
# campuserinvitation_set
# competitionuserregistration_set
# event_set
# eventuserregistration_set
# usersolution_set
# Fields added via inheritance:
# organizer
def __unicode__(self):
return self.login
class Organizer(User):
'''
Represents an organizer. Organizer can organize multiple competitions
or events.
'''
motto = models.CharField(max_length=50)
# TODO: there are 2 data descriptors added via many-to-many relationship
# in this case it's organized_event_set (custom name due to
# the fact that optional parameter related_name was defined)
# and eventorgregistration_set
# Investigate!
# Fields added via foreign keys:
# competitionorgregistration_set
# eventorgregistration_set
# organized_event_set
# registeredorg
# orgsolution_set
# post_set
# Fields added via inheritance:
# user_ptr
# Register to the admin site
admin.site.register(User)
admin.site.register(Organizer)
| from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
# User-related models
class User(models.Model):
'''
Represents an user. Both organizers and participants are considered as
users. This allows usage of the same accounts for the users that became
organizers later.
'''
# Fields accessible from AuthUser:
# username
# first_name
# last_name
# email
# password
# is_staff
# is_active
# is_superuser
# last_login
# date_joined
authuser = models.OneToOneField(AuthUser)
# personal info
date_of_birth = models.DateTimeField(blank=True)
sex = models.CharField(max_length=1, blank=True, choices=(('M', 'male'),
('F', 'female')))
social_security_number = models.CharField(max_length=11, blank=True)
state_id_number = models.CharField(max_length=7, blank=True)
competes = models.ManyToManyField('competitions.Competition',
through='competitions.CompetitionUserRegistration')
# address information
phone_number = models.CharField(max_length=30, blank=True)
parent_phone_number = models.CharField(max_length=30, blank=True)
address = models.ForeignKey('schools.Address')
# school related info
school = models.ForeignKey('schools.School', blank=True)
school_class = models.CharField(max_length=20, blank=True)
classlevel = models.CharField(max_length=2, blank=True,
choices=(('Z2', 'Z2'),
('Z3', 'Z3'),
('Z4', 'Z4'),
('Z5', 'Z5'),
('Z6', 'Z6'),
('Z7', 'Z7'),
('Z8', 'Z8'),
('Z9', 'Z9'),
('S1', 'S1'),
('S2', 'S2'),
('S3', 'S3'),
('S4', 'S4')))
# Fields added via foreign keys:
# camp_set
# campuserinvitation_set
# competitionuserregistration_set
# event_set
# eventuserregistration_set
# usersolution_set
# Fields added via inheritance:
# organizer
def __unicode__(self):
return self.login
class Organizer(User):
'''
Represents an organizer. Organizer can organize multiple competitions
or events.
'''
motto = models.CharField(max_length=50)
# TODO: there are 2 data descriptors added via many-to-many relationship
# in this case it's organized_event_set (custom name due to
# the fact that optional parameter related_name was defined)
# and eventorgregistration_set
# Investigate!
# Fields added via foreign keys:
# competitionorgregistration_set
# eventorgregistration_set
# organized_event_set
# registeredorg
# orgsolution_set
# post_set
# Fields added via inheritance:
# user_ptr
# Register to the admin site
admin.site.register(User)
admin.site.register(Organizer)
| Python | 0.001402 |
a81e65eaabb0f3e99721854d2dcaa7dd1f8b0a21 | Create SVM.py | 02.Algorithms/SVM.py | 02.Algorithms/SVM.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 19 13:23:12 2017
@author: rmatam
"""
# -*- coding: utf-8 -*-
# 2015/01/11
# Script passed in py2 & py3 with Ubuntu 14.04 env.
# Prerequirement: pip install numpy scipy scikit-learn
# furthermore info http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html.
# furthermore info http://scikit-learn.org/stable/modules/classes.html#module-sklearn.svm
# There have a lot of descriptions of setting variables on the website, please check it if you need the further setting.
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer as tfidf
vec =tfidf(smooth_idf =False)
svc = svm.SVC(kernel='poly') # further settings on website: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
# training set, "List" type.
trainset =["good good good good good great great great", # corpus 1
"bad bad bad bad bad bad dirty dirty dirty", # corpus 2
]
trainTag =["pos", "neg"] # corpus's tags.
# test set, "List" type.
testset =["good good good good good great great great",
"good good good good good great great great bad",
"good good good good good great great great bad bad",
"good good good good good great great great bad bad bad",
"good good good good good great great great dirty",
"good good good good good great great great dirty dirty",
"good good good good good great great great dirty dirty dirty",
"bad bad bad bad bad bad dirty dirty dirty",
"bad bad bad bad bad bad dirty dirty dirty good",
"bad bad bad bad bad bad dirty dirty dirty good good",
"bad bad bad bad bad bad dirty dirty dirty good good good",
"bad bad bad bad bad bad dirty dirty dirty great",
"bad bad bad bad bad bad dirty dirty dirty great great",
"bad bad bad bad bad bad dirty dirty dirty great great great",
]
testTag =["pos", "pos", "pos", "pos", "pos", "pos", "pos",
"neg", "neg", "neg", "neg", "neg", "neg", "neg",
]
# training set is converting to the tfidf array.
trainRs =vec.fit_transform(trainset).toarray()
# test set is converting to the tfidf array.
testRs =vec.fit_transform(testset).toarray()
# the tfidf array result of training & test set.
print("Training set tfidf result.")
print(trainRs.shape)
print(trainRs)
print("----------------------------------------")
print("Test set tfidf result.")
print(testRs.shape)
print(testRs)
# training...
svc.fit(trainRs, trainTag) # further settings on website: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
# accuracy of the model.
print("----------------------------------------")
accuracy =svc.score(testRs, testTag)
print("SVM model accuracy:")
print(accuracy)
# predicting test set result.
print("----------------------------------------")
predict =svc.predict(testRs)
print("SVM model predict result:")
print(predict)
'''
Console Print:::
Training set tfidf result.
(2, 4)
[[ 0. 0. 0.85749293 0.51449576]
[ 0.89442719 0.4472136 0. 0. ]]
----------------------------------------
Test set tfidf result.
(14, 4)
[[ 0. 0. 0.85749293 0.51449576]
[ 0.16903085 0. 0.84515425 0.50709255]
[ 0.32444284 0. 0.81110711 0.48666426]
[ 0.45749571 0. 0.76249285 0.45749571]
[ 0. 0.16903085 0.84515425 0.50709255]
[ 0. 0.32444284 0.81110711 0.48666426]
[ 0. 0.45749571 0.76249285 0.45749571]
[ 0.89442719 0.4472136 0. 0. ]
[ 0.88465174 0.44232587 0.14744196 0. ]
[ 0.85714286 0.42857143 0.28571429 0. ]
[ 0.81649658 0.40824829 0.40824829 0. ]
[ 0.88465174 0.44232587 0. 0.14744196]
[ 0.85714286 0.42857143 0. 0.28571429]
[ 0.81649658 0.40824829 0. 0.40824829]]
----------------------------------------
SVM model accuracy:
1.0
----------------------------------------
SVM model predict result:
['pos' 'pos' 'pos' 'pos' 'pos' 'pos' 'pos' 'neg' 'neg' 'neg' 'neg' 'neg'
'neg' 'neg']
| Python | 0.000007 | |
806594afc5468d3cee183defba24501516b791f0 | add cities borders | belarus_city_borders.py | belarus_city_borders.py | from _helpers import cursor_wrap, dump
@cursor_wrap
def main(cursor):
sql = """
SELECT ct.osm_id, c.name AS country, '' AS region, '' AS subregion, ct.name AS city, ST_AsGeoJSON(ct.way)
FROM osm_polygon c
LEFT JOIN osm_polygon ct ON ST_Contains(c.way, ct.way)
WHERE c.osm_id = -59065 AND ct.admin_level = '4'
AND ct.tags->'place' IN ('city', 'town')
UNION
SELECT ct.osm_id, c.name AS country, r.name AS region, '' AS subregion, ct.name AS city, ST_AsGeoJSON(ct.way)
FROM osm_polygon c
LEFT JOIN osm_polygon r ON ST_Contains(c.way, r.way)
LEFT JOIN osm_polygon ct ON ST_Contains(r.way, ct.way)
WHERE c.osm_id = -59065 AND r.admin_level = '4' AND ct.admin_level = '6'
AND ct.tags->'place' IN ('city', 'town')
UNION
SELECT ct.osm_id, c.name AS country, r.name AS region, s.name AS subregion, ct.name AS city, ST_AsGeoJSON(ct.way)
FROM osm_polygon c
LEFT JOIN osm_polygon r ON ST_Contains(c.way, r.way)
LEFT JOIN osm_polygon s ON ST_Contains(r.way, s.way)
LEFT JOIN osm_polygon ct ON ST_Contains(s.way, ct.way)
WHERE c.osm_id = -59065 AND r.admin_level = '4' AND s.admin_level = '6'
AND ct.tags->'place' IN ('city', 'town')
"""
cursor.execute(sql)
dump(__file__, sorted(cursor.fetchall(), key=lambda item: item[1:5]),
('osmid', 'country', 'region', 'subregion', 'city', 'geojson'))
if __name__ == '__main__':
main()
| Python | 0.999998 | |
fdb901a59e8dd61892f5033efe49e3bbbdae097f | Create CNlab1.py | CNlab1.py | CNlab1.py | #To check the validity of ip address
import sys
import textwrap
def valid(ip):
if ip.count('.')!=3:
print("Invalid")
sys.exit(0)
ipl=[]
ipl=ip.split('.')
for i in ipl:
if not i.isdigit():
print("Invalid")
sys.exit(0)
if int(i)>255:
print("Invalid")
sys.exit(0)
else:
print("Valid")
#To calculate bit mask
inp=raw_input("Enter the ip address\n")
li=inp.split('/')
ipv=li[0]
valid(li[0])
n=int(li[1])
h=32-int(li[1])
mask= '1'* n + '0'*h
maskd= '.'.join(str(int(i,2)) for i in textwrap.wrap(mask, 8))
print "Mask : ", maskd
maskd_list= maskd.split('.')
ipv_list=ipv.split('.')
#To calculate network id
k=0
net_id=[]
for i in range(0,4):
net_id.append(str(int(maskd_list[k]) & int(ipv_list[k])))
k+=1
print "Network id : " , '.'.join(net_id)
#To calculate brodcast address
zoo=[]
for i in net_id:
zoo.append("{0:08b}".format(int(i)))
zoos = ''.join(zoo)
broad=[]
for i in textwrap.wrap(zoos[:n] + str(int(zoos[n:],2) | int( '1'* h)), 8):
broad.append(str(int(i,2)))
print('Broadcast address : ', '.'.join(broad))
#To calculate no. of subnets
print "Number of subnets", 2 ** (n)
#To calculate nu. of hosts
print "Number of hosts", (2 ** (32-n)) - 2
#To print first address
print "First address : " + '.'.join(net_id[:3])+ '.' + str(int(net_id[3]) + 1)
#To print last address
print "Last address : " + '.'.join(broad[:3]) + '.' + str(int(broad[3]) - 1)
| Python | 0.000002 | |
f16dcf6fe2d53be0444faad9f265781282201d95 | Use consistent quotes | colorama/ansi.py | colorama/ansi.py | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
Cursor = AnsiCursor()
| # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + "2;" + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + "J"
def clear_line(mode=2):
return CSI + str(mode) + "K"
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + "A"
def DOWN(self, n=1):
return CSI + str(n) + "B"
def FORWARD(self, n=1):
return CSI + str(n) + "C"
def BACK(self, n=1):
return CSI + str(n) + "D"
def POS(self, x=1, y=1):
return CSI + str(y) + ";" + str(x) + "H"
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
Cursor = AnsiCursor()
| Python | 0 |
4a261bd97de5868ff6065ac69345d3bef38563f1 | Check history_object in historical records | simple_history/tests/tests.py | simple_history/tests/tests.py | from datetime import datetime, timedelta
from django.test import TestCase
from .models import Poll, Choice
today = datetime(2021, 1, 1, 10, 0)
tomorrow = today + timedelta(days=1)
class HistoricalRecordsTest(TestCase):
def assertDatetimesEqual(self, time1, time2):
self.assertAlmostEqual(time1, time2, delta=timedelta(seconds=2))
def assertRecordValues(self, record, values_dict):
for key, value in values_dict.items():
self.assertEqual(getattr(record, key), value)
self.assertEqual(record.history_object.__class__, Poll)
for key, value in values_dict.items():
if key != 'history_type':
self.assertEqual(getattr(record.history_object, key), value)
def test_create(self):
p = Poll(question="what's up?", pub_date=today)
p.save()
history = p.history.all()
record, = history
self.assertRecordValues(record, {
'question': "what's up?",
'pub_date': today,
'id': p.id,
'history_type': "+"
})
self.assertDatetimesEqual(record.history_date, datetime.now())
def test_update(self):
Poll.objects.create(question="what's up?", pub_date=today)
p = Poll.objects.get()
p.pub_date = tomorrow
p.save()
history = p.history.all()
update_record, create_record = history
self.assertRecordValues(create_record, {
'question': "what's up?",
'pub_date': today,
'id': p.id,
'history_type': "+"
})
self.assertRecordValues(update_record, {
'question': "what's up?",
'pub_date': tomorrow,
'id': p.id,
'history_type': "~"
})
def test_delete(self):
p = Poll.objects.create(question="what's up?", pub_date=today)
poll_id = p.id
p.delete()
history = Poll.history.all()
delete_record, create_record = history
self.assertRecordValues(create_record, {
'question': "what's up?",
'pub_date': today,
'id': poll_id,
'history_type': "+"
})
self.assertRecordValues(delete_record, {
'question': "what's up?",
'pub_date': today,
'id': poll_id,
'history_type': "-"
})
class RegisterTest(TestCase):
def test_register_no_args(self):
self.assertEqual(len(Choice.history.all()), 0)
poll = Poll.objects.create(pub_date=today)
choice = Choice.objects.create(poll=poll, votes=0)
self.assertEqual(len(choice.history.all()), 1)
class HistoryManagerTest(TestCase):
def test_most_recent(self):
poll = Poll.objects.create(question="what's up?", pub_date=today)
poll.question = "how's it going?"
poll.save()
poll.question = "why?"
poll.save()
poll.question = "how?"
most_recent = poll.history.most_recent()
self.assertEqual(most_recent.__class__, Poll)
self.assertEqual(most_recent.question, "why?")
def test_as_of(self):
poll = Poll.objects.create(question="what's up?", pub_date=today)
poll.question = "how's it going?"
poll.save()
poll.question = "why?"
poll.save()
poll.question = "how?"
most_recent = poll.history.most_recent()
self.assertEqual(most_recent.question, "why?")
times = [r.history_date for r in poll.history.all()]
question_as_of = lambda time: poll.history.as_of(time).question
self.assertEqual(question_as_of(times[0]), "why?")
self.assertEqual(question_as_of(times[1]), "how's it going?")
self.assertEqual(question_as_of(times[2]), "what's up?")
| from datetime import datetime, timedelta
from django.test import TestCase
from .models import Poll, Choice
today = datetime(2021, 1, 1, 10, 0)
tomorrow = today + timedelta(days=1)
class HistoricalRecordsTest(TestCase):
def assertDatetimesEqual(self, time1, time2):
self.assertAlmostEqual(time1, time2, delta=timedelta(seconds=2))
def assertRecordValues(self, record, values_dict):
for key, value in values_dict.items():
self.assertEqual(getattr(record, key), value)
def test_create(self):
p = Poll(question="what's up?", pub_date=today)
p.save()
history = p.history.all()
record, = history
self.assertRecordValues(record, {
'question': "what's up?",
'pub_date': today,
'id': p.id,
'history_type': "+"
})
self.assertDatetimesEqual(record.history_date, datetime.now())
def test_update(self):
Poll.objects.create(question="what's up?", pub_date=today)
p = Poll.objects.get()
p.pub_date = tomorrow
p.save()
history = p.history.all()
update_record, create_record = history
self.assertRecordValues(create_record, {
'question': "what's up?",
'pub_date': today,
'id': p.id,
'history_type': "+"
})
self.assertRecordValues(update_record, {
'question': "what's up?",
'pub_date': tomorrow,
'id': p.id,
'history_type': "~"
})
def test_delete(self):
p = Poll.objects.create(question="what's up?", pub_date=today)
poll_id = p.id
p.delete()
history = Poll.history.all()
delete_record, create_record = history
self.assertRecordValues(create_record, {
'question': "what's up?",
'pub_date': today,
'id': poll_id,
'history_type': "+"
})
self.assertRecordValues(delete_record, {
'question': "what's up?",
'pub_date': today,
'id': poll_id,
'history_type': "-"
})
class RegisterTest(TestCase):
def test_register_no_args(self):
self.assertEqual(len(Choice.history.all()), 0)
poll = Poll.objects.create(pub_date=today)
choice = Choice.objects.create(poll=poll, votes=0)
self.assertEqual(len(choice.history.all()), 1)
class HistoryManagerTest(TestCase):
def test_most_recent(self):
poll = Poll.objects.create(question="what's up?", pub_date=today)
poll.question = "how's it going?"
poll.save()
poll.question = "why?"
poll.save()
poll.question = "how?"
most_recent = poll.history.most_recent()
self.assertEqual(most_recent.__class__, Poll)
self.assertEqual(most_recent.question, "why?")
def test_as_of(self):
poll = Poll.objects.create(question="what's up?", pub_date=today)
poll.question = "how's it going?"
poll.save()
poll.question = "why?"
poll.save()
poll.question = "how?"
most_recent = poll.history.most_recent()
self.assertEqual(most_recent.question, "why?")
times = [r.history_date for r in poll.history.all()]
question_as_of = lambda time: poll.history.as_of(time).question
self.assertEqual(question_as_of(times[0]), "why?")
self.assertEqual(question_as_of(times[1]), "how's it going?")
self.assertEqual(question_as_of(times[2]), "what's up?")
| Python | 0 |
bf6f58d5958275070c1018174217873ea08db904 | Add test pull task | nodeconductor/structure/tests/tasks.py | nodeconductor/structure/tests/tasks.py | from celery import shared_task
from nodeconductor.core import utils as core_utils
@shared_task
def pull_instance(serialized_instance, pulled_disk):
""" Test-only task that allows to emulate pull operation """
instance = core_utils.deserialize_instance(serialized_instance)
instance.disk = pulled_disk
instance.save()
| Python | 0.000074 | |
7ce8c06c5447d89f941d482c84693e432384def6 | rename `file` to `filename` for clarity. | pysellus/loader.py | pysellus/loader.py | import os
import sys
from inspect import isfunction
from importlib import import_module
def load(path):
if _is_python_file(path):
sys.path.insert(0, os.path.dirname(path))
module = import_module(_get_module_name_from_path(path))
return _get_checks_from_module(module)
functions = []
for module in _get_modules(path):
functions += _get_checks_from_module(module)
return functions
def _get_module_name_from_path(path):
return _remove_extension(path.split('/')[-1])
def _get_checks_from_module(module):
"""
Gets all setup functions from the given module.
Setup functions are required to start with 'pscheck_'
"""
functions = []
for name in dir(module):
value = getattr(module, name)
if isfunction(value) and name.startswith('pscheck_'):
functions.append(value)
return functions
def _get_modules(directory):
sys.path.insert(0, directory)
return [
import_module(filename)
for filename in _get_python_files(directory)
]
def _get_python_files(directory):
return [
_remove_extension(filename)
for filename in os.listdir(directory)
if not filename.startswith('__') and _is_python_file(filename)
]
def _is_python_file(filename):
return filename.endswith('.py')
def _remove_extension(filename):
return filename[:-3]
| import os
import sys
from inspect import isfunction
from importlib import import_module
def load(path):
if _is_python_file(path):
sys.path.insert(0, os.path.dirname(path))
module = import_module(_get_module_name_from_path(path))
return _get_checks_from_module(module)
functions = []
for module in _get_modules(path):
functions += _get_checks_from_module(module)
return functions
def _get_module_name_from_path(path):
return _remove_file_extension(path.split('/')[-1])
def _get_checks_from_module(module):
"""
Gets all setup functions from the given module.
Setup functions are required to start with 'pscheck_'
"""
functions = []
for name in dir(module):
value = getattr(module, name)
if isfunction(value) and name.startswith('pscheck_'):
functions.append(value)
return functions
def _get_modules(directory):
sys.path.insert(0, directory)
return [
import_module(filename)
for filename in _get_python_files(directory)
]
def _get_python_files(directory):
return [
_remove_file_extension(file)
for file in os.listdir(directory)
if not file.startswith('__') and _is_python_file(file)
]
def _is_python_file(filename):
return filename.endswith('.py')
def _remove_file_extension(filename):
return filename[:-3]
| Python | 0 |
cc5f55fa6eb6d0ecaaef1c1e269fb40c2731fef5 | Add test helpers | src/lib/test_helpers.py | src/lib/test_helpers.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""
Utility classes for page objects used in tests.
Details:
Most of the tests require a sequence of primitive methods of the page
object. If the sequence repeats itself among tests, it should be shared in
this module.
"""
import uuid
from lib import base
from lib.constants.test import create_new_program
class LhnMenu(base.Test):
@staticmethod
def create_new_program():
pass
class ModalNewProgramPage(base.Test):
"""Methods for simulating common user actions"""
@staticmethod
def enter_test_data(modal):
"""Fills out all fields in the modal
Args:
modal (lib.page.modal.new_program.NewProgramModal)
"""
unique_id = str(uuid.uuid4())
modal.enter_title(create_new_program.TITLE + unique_id)
modal.enter_description(
create_new_program.DESCRIPTION_SHORT)
modal.enter_notes(
create_new_program.NOTES_SHORT)
modal.enter_code(create_new_program.CODE + unique_id)
modal.filter_and_select_primary_contact("example")
modal.filter_and_select_secondary_contact("example")
modal.enter_program_url(
create_new_program.PROGRAM_URL)
modal.enter_reference_url(
create_new_program.REFERENCE_URL)
modal.enter_effective_date_start_month()
modal.enter_stop_date_end_month()
| Python | 0.000001 | |
8e7350cbfc96541d9a3ddc970309c60793bb4126 | fix TermsFacet | corehq/apps/es/facets.py | corehq/apps/es/facets.py | class FacetResult(object):
def __init__(self, raw, facet):
self.facet = facet
self.raw = raw
self.result = raw.get(self.facet.name, {}).get(self.facet.type, {})
class Facet(object):
name = None
type = None
params = None
result_class = FacetResult
def __init__(self):
raise NotImplementedError()
def parse_result(self, result):
return self.result_class(result, self)
class TermsResult(FacetResult):
def counts_by_term(self):
return {d['term']: d['count'] for d in self.result}
class TermsFacet(Facet):
type = "terms"
result_class = TermsResult
def __init__(self, term, name, size=None):
assert(name.isalnum(), "name must be a valid python variable name")
self.name = name
self.params = {
"field": term,
}
if size is not None:
self.params["size"] = size
class DateHistogram(Facet):
type = "date_histogram"
def __init__(self, name, datefield, interval):
self.name = name
self.params = {
"field": datefield,
"interval": interval
}
| class FacetResult(object):
def __init__(self, raw, facet):
self.facet = facet
self.raw = raw
self.result = raw.get(self.facet.name, {}).get(self.facet.type, {})
class Facet(object):
name = None
type = None
params = None
result_class = FacetResult
def __init__(self):
raise NotImplementedError()
def parse_result(self, result):
return self.result_class(result, self)
class TermsResult(FacetResult):
def counts_by_term(self):
return {d['term']: d['count'] for d in self.result}
class TermsFacet(Facet):
type = "terms"
result_class = TermsResult
def __init__(self, name, term, size=None):
assert(name.isalnum(), "name must be a valid python variable name")
self.name = name
self.params = {
"field": term,
}
if size is not None:
self.params["size"] = size
class DateHistogram(Facet):
type = "date_histogram"
def __init__(self, name, datefield, interval):
self.name = name
self.params = {
"field": datefield,
"interval": interval
}
| Python | 0.000001 |
04ded12c05b20fc3a25956712f8e0fb1723c3edb | Add a snippet (python/warnings). | python/warnings.py | python/warnings.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
import warnings
def custom_formatwarning(message, category, filename, lineno, line=""):
"""Ignore everything except the message."""
return "Warning: " + str(message) + "\n"
def main():
"""Main function"""
warnings.formatwarning = custom_formatwarning
warnings.warn("Foo", UserWarning)
if __name__ == '__main__':
main()
| Python | 0.000004 | |
630413b6bdc385095fe8da549b691d54fc6a4504 | Add ITWeek.py | ITWeek.py | ITWeek.py | import requests
from bs4 import BeautifulSoup
def main():
url = 'https://ex-portal3.reed.jp/list/SODECS2017_ja.html'
res = requests.get(url)
soup = BeautifulSoup(res.content, 'html.parser')
companies = soup.find_all('tr')
for company in companies:
print(company.text)
if __name__ == '__main__':
main()
| Python | 0 | |
1f71153cf814f7d34835cea6eafe44683035d874 | Add compare_files.py | compare_files.py | compare_files.py | import difflib
def compare_files(filename1, filename2):
f = open(filename1, "r")
filelines1 = f.readlines()
f.close()
f = open(filename2, "r")
filelines2 = f.readlines()
f.close()
diffs = difflib.context_diff(filelines1,
filelines2,
fromfile=filename1,
tofile=filename2)
count = 0
for line in diffs:
print line,
count += 1
return count == 0
| Python | 0.000002 | |
3ce2e0b8825c7abc219a812c5abda45184fbfdec | add wot wikia plugin | plugins/wotwikia.py | plugins/wotwikia.py | """ WoT Wikia Plugin (botwot plugins.wiki) """
# Copyright 2015 Ray Schulz <https://rascul.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import requests
from pyaib.plugins import keyword, plugin_class
@plugin_class
class WotWikia(object):
def __init__(self, context, config):
pass
@keyword("wot")
def keyword_wot(self, context, msg, trigger, args, kargs):
"""
<query> - Search the WoT Wikia for <query>
"""
target_user = ""
query = ""
if len(args) >= 3 and args[-2] == "|":
target_user = args[-1]
query = " ".join(args[:-2])
else:
query = " ".join(args)
url = "http://wot.wikia.com/api/v1/Search/List"
payload = {'query': query, 'limit': 1}
r = requests.get(url, params=payload)
j = json.loads(r.text)
if j and 'items' in j:
if target_user:
msg.reply("%s: %s" % (target_user, j['items'][0]['url']))
else:
msg.reply(j['items'][0]['url'])
| Python | 0 | |
46818f540d48bd967e8e0e5d846f0757f2ca6c1c | Add test for set_shard() | deepchem/data/tests/test_setshard.py | deepchem/data/tests/test_setshard.py | import deepchem as dc
import numpy as np
def test_setshard_with_X_y():
"""Test setharding on a simple example"""
X = np.random.rand(10, 3)
y = np.random.rand(10,)
dataset = dc.data.DiskDataset.from_numpy(X, y)
assert dataset.get_shape()[0][0] == 10
assert dataset.get_shape()[1][0] == 10
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
X = X[1:]
y = y[1:]
w = w[1:]
ids = ids[1:]
dataset.set_shard(i, X, y, w, ids)
assert dataset.get_shape()[0][0] == 9
assert dataset.get_shape()[1][0] == 9
| Python | 0.000001 | |
a8bbbb77e2036b66a5083bd2a1393b0de588af0c | Rename to alg_count_changes.py & count_changes() | alg_count_changes.py | alg_count_changes.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Count Changes.
Count how many distinct ways you can make change that amount.
Assume that you have an infinite number of each kind of coin.
"""
def count_changes_recur(amount, coins, n):
"""Count changes by recursion.
Time complexity: O(2^n).
Space complexity: O(1).
"""
if amount < 0:
return 0
if amount == 0:
return 1
# When number of coins is 0 but there is still amount remaining.
if n < 0 and amount >= 1:
return 0
# Compute ways with coin n included plus that with coin excluded.
count_in = count_changes_recur(amount - coins[n], coins, n)
count_ex = count_changes_recur(amount, coins, n - 1)
count = count_in + count_ex
return count
def _count_changes_memo(amount, coins, T, n):
"""Helper function for count_changes_memo()."""
if amount == 0:
return 1
if amount < 0:
return 0
if n < 0 and amount >= 1:
return 0
count_in = _count_changes_memo(amount - coins[n - 1], coins, T, n)
count_ex = _count_changes_memo(amount, coins, T, n - 1)
T[n - 1][amount] = count_in + count_ex
return T[n - 1][amount]
def count_changes_memo(amount, coins, n):
"""Count changes by top-bottom dynamic programming:
recursion + memoization.
Time complexity: O(a * c), where a is amount, and c is number of coins.
Space complexity: O(a * c).
"""
T = [[0] * (amount + 1) for c in range(n + 1)]
for c in range(n + 1):
T[c][0] = 1
return _count_changes_memo(amount, coins, T, n)
def count_changes_dp(amount, coins):
"""Count changes by bottom-up dynamic programming.
Time complexity: O(a * c), where a is amount, and c is number of coins.
Space complexity: O(a * c).
"""
n = len(coins)
T = [[0] * (amount + 1) for c in range(n)]
for c in range(n):
T[c][0] = 1
for c in range(n):
for a in range(1, amount + 1):
if a >= coins[c]:
count_in = T[c][a - coins[c]]
else:
count_in = 0
if c >= 1:
count_ex = T[c - 1][a]
else:
count_ex = 0
T[c][a] = count_in + count_ex
return T[-1][-1]
def main():
import time
amount = 5
coins = [1, 2, 3] # Ans = 5.
n = len(coins) - 1
start_time = time.time()
print('Make change by recursion: {}'
.format(count_changes_recur(amount, coins, n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('Make change by memo: {}'
.format(count_changes_memo(amount, coins, n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('Make change by DP: {}'
.format(count_changes_dp(amount, coins)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| Python | 0.000015 | |
6ad081e91e337e1627b70674109f45ba35248f8c | Add missing migration file to the repo | zou/migrations/versions/e839d6603c09_add_person_id_to_shot_history.py | zou/migrations/versions/e839d6603c09_add_person_id_to_shot_history.py | """add person id to shot history
Revision ID: e839d6603c09
Revises: 346250b5304c
Create Date: 2020-12-14 12:00:19.045783
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import sqlalchemy_utils
import uuid
# revision identifiers, used by Alembic.
revision = 'e839d6603c09'
down_revision = '346250b5304c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entity_version', sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), default=uuid.uuid4, nullable=True))
op.create_index(op.f('ix_entity_version_person_id'), 'entity_version', ['person_id'], unique=False)
op.create_foreign_key(None, 'entity_version', 'person', ['person_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'entity_version', type_='foreignkey')
op.drop_index(op.f('ix_entity_version_person_id'), table_name='entity_version')
op.drop_column('entity_version', 'person_id')
# ### end Alembic commands ###
| Python | 0 | |
0538523f617ec1d410861b52a647c788c06c267a | Fix llg tests. | pyoommf/test_llg.py | pyoommf/test_llg.py | from llg import LLG
def test_llg_mif():
t = 1.5e-9
m_init = (0, 1, 0)
Ms = 1e6
alpha = 0.01
gamma = 2.21e5
name = 'llgtest'
llg = LLG(t, m_init, Ms, alpha, gamma, name)
mif_string = llg.get_mif()
lines = mif_string.split('\n')
assert 'Specify Oxs_RungeKuttaEvolve {' in lines[0]
line2 = lines[1].split()
assert float(line2[1]) == alpha
line3 = lines[2].split()
assert float(line3[1]) == gamma
line8 = lines[8].split()
assert float(line8[1]) == t
line11 = lines[11].split()
assert float(line11[1]) == Ms
line13 = lines[13].split()
assert float(line13[1][1:]) == m_init[0]
assert float(line13[2]) == m_init[1]
assert float(line13[3][:-1]) == m_init[2]
def test_llg_formatting():
t = 1.5e-9
m_init = (0, 1, 0)
Ms = 1e6
alpha = 0.01
gamma = 2.21e5
name = 'llgtest'
llg = LLG(t, m_init, Ms, alpha, gamma, name)
mif_string = llg.get_mif()
assert mif_string[0] == 'S'
assert mif_string[-1] == '\n'
assert mif_string[-2] == '\n'
| from llg import LLG
def test_llg_mif():
t = 1.5e-9
m_init = (0, 1, 0)
Ms = 1e6
alpha = 0.01
gamma = 2.21e5
llg = LLG(t, m_init, Ms, alpha, gamma)
mif_string = llg.get_mif()
lines = mif_string.split('\n')
assert 'Specify Oxs_RungeKuttaEvolve {' in lines[0]
line2 = lines[1].split()
assert float(line2[1]) == alpha
line3 = lines[2].split()
assert float(line3[1]) == gamma
line8 = lines[8].split()
assert float(line8[1]) == t
line11 = lines[11].split()
assert float(line11[1]) == Ms
line13 = lines[13].split()
assert float(line13[1][1:]) == m_init[0]
assert float(line13[2]) == m_init[1]
assert float(line13[3][:-1]) == m_init[2]
def test_llg_formatting():
t = 1.5e-9
m_init = (0, 1, 0)
Ms = 1e6
alpha = 0.01
gamma = 2.21e5
llg = LLG(t, m_init, Ms, alpha, gamma)
mif_string = llg.get_mif()
assert mif_string[0] == 'S'
assert mif_string[-1] == '\n'
assert mif_string[-2] == '\n'
| Python | 0.000001 |
22252d6978f237a2a46415dcf54d4adbed92b1ce | Add LLG tests. | pyoommf/test_llg.py | pyoommf/test_llg.py | from llg import LLG
def test_llg_mif():
t = 1.5e-9
m_init = (0, 1, 0)
Ms = 1e6
alpha = 0.01
gamma = 2.21e5
llg = LLG(t, m_init, Ms, alpha, gamma)
mif_string = llg.get_mif()
lines = mif_string.split('\n')
assert 'Specify Oxs_RungeKuttaEvolve {' in lines[0]
line2 = lines[1].split()
assert float(line2[1]) == alpha
line3 = lines[2].split()
assert float(line3[1]) == gamma
line8 = lines[8].split()
assert float(line8[1]) == t
line11 = lines[11].split()
assert float(line11[1]) == Ms
line13 = lines[13].split()
assert float(line13[1][1:]) == m_init[0]
assert float(line13[2]) == m_init[1]
assert float(line13[3][:-1]) == m_init[2]
def test_llg_formatting():
t = 1.5e-9
m_init = (0, 1, 0)
Ms = 1e6
alpha = 0.01
gamma = 2.21e5
llg = LLG(t, m_init, Ms, alpha, gamma)
mif_string = llg.get_mif()
assert mif_string[0] == 'S'
assert mif_string[-1] == '\n'
assert mif_string[-2] == '\n'
| Python | 0 | |
b21fbb09b33e40a33ad3ea33b0394fed421c8a6e | add num02 | pythonTest/num02.py | pythonTest/num02.py | def reverse(x):
changeTuple=tuple(x)
reverseTuple=changeTuple[::-1]
print(''.join(reverseTuple))
test = "this is test string"
reverse(test)
| Python | 0.999971 | |
edc35e4aefe336eb1bf02dbf7104925389276fa6 | Add shellcheck for sh filetype | pythonx/lints/sh.py | pythonx/lints/sh.py | # -*- coding: utf-8 -*-
from validator import Validator
class Sh(Validator):
__filetype__ = "sh"
checker = "shellcheck"
args = "-x -f gcc"
regex = r"""
.+:
(?P<lnum>\d+):
(?P<col>\d+):
.*
\s
(
(?P<error>error)
|
(?P<warning>warning)
):
\s
(?P<text>.*)"""
| Python | 0 | |
c25cebf31648466111cb3d576e0a398bb4220ccf | Add test for sabnzbd cleanupfilename.py | sabnzbd/test_cleanupfilename.py | sabnzbd/test_cleanupfilename.py | import unittest
from cleanupfilename import rename
class TestRename(unittest.TestCase):
files = []
dirs = []
def setUp(self):
self.files = [('filename-sample.x264.mp4', 'filename.mp4'),
('filename.mp4', 'filename.mp4')]
self.dirs = [('filename sample mp4', 'filename'),
('filename 540p', 'filename'),
('filename [web-DL] part001.', 'filename'),
('actual.file.name-1080p.BluRay.x264-GECKOS[rarbg]', 'actual file name'),
]
def test_list(self):
for file, output in self.files:
self.assertEqual(rename(file, False), output)
for file, output in self.dirs:
self.assertEqual(rename(file, True), output)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
9dbc755a17fbea3fbec52191d1e7bac60e5995e9 | test links in the docs | test_links.py | test_links.py | #!/usr/bin/env python
import os
import time
import yaml
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
current = os.path.split(os.path.realpath(__file__))[0]
yaml_file = "{0}/mkdocs.yml".format(current)
mkdocs = yaml.load(open(yaml_file))['pages']
host='http://127.0.0.1:8000'
page_filters = [
'index.md',
'resources.md',
'updates.md',
'javadoc.md',
'iOSdoc.md',
'server/sdk',
'server/old'
]
link_filters = [
'mailto:',
'im_android_api_docs',
'jmessage_ios_appledoc_html',
'www.mkdocs.org',
# 'www.jiguang.cn',
# 'blog.jiguang.cn',
'sdkfiledl.jiguang.cn',
# 'developer.apple.com',
'developer.android.com',
'google.com',
'wikipedia.org',
# 'github.com',
'api.jpush.cn',
'report.jpush.cn',
'device.jpush.cn',
'admin.jpush.cn',
'api.im.jpush.cn',
'report.im.jpush.cn',
'api.sms.jpush.cn',
]
def extract_value_from_list(mkdocs):
pages = []
for product in mkdocs:
for item in product.values():
if isinstance(item, str):
pages.append(item)
else:
page = extract_value_from_list(item)
pages.extend(page)
return pages
def is_valid_page(item):
return _is_valid(page_filters, item)
def is_valid_link(item):
if not item or item.startswith('#'):
return False
return _is_valid(link_filters, item)
def _is_valid(filters, item):
for filter in filters:
if filter in item:
return False
return True
def build_link(base, path):
if path.startswith('../') or path.startswith('./'):
return urljoin(base, path)
return path
def get_links(url):
links = []
html = requests.get(url)
bs = BeautifulSoup(html.content, 'html.parser')
for link in bs.find('div', { 'id': 'content' }).findAll('a'):
if 'href' in link.attrs and is_valid_link(link.attrs['href']):
links.append({'text': link.get_text(), 'href': link.attrs['href']})
return links
if __name__ == '__main__':
pages = extract_value_from_list(mkdocs)
for page in pages:
msg = "\nworking on " + page + ":\n"
if is_valid_page(page):
url = host+'/'+page.replace('.md', '/')
msg += 'url: ' + url + "\n"
links = get_links(url)
if links:
for link in links:
l = build_link(url, link['href'])
r = requests.get(l)
if r.status_code != 200:
print(msg + link['href'] + ' => ' + l)
print(link)
print(r.status_code)
# time.sleep(1)
else:
pass
# print("This page doesn't have valid links")
else:
pass
# print("skip...")
| Python | 0 | |
d56b1623a278d61ff8b113b95534ce4dd6682e25 | fix bug 1018349 - migration | alembic/versions/1baef149e5d1_bug_1018349_add_coalesce_to_max_sort_.py | alembic/versions/1baef149e5d1_bug_1018349_add_coalesce_to_max_sort_.py | """bug 1018349 - add COALESCE to max(sort) when adding a new product
Revision ID: 1baef149e5d1
Revises: 26521f842be2
Create Date: 2014-06-25 15:04:37.934064
"""
# revision identifiers, used by Alembic.
revision = '1baef149e5d1'
down_revision = '26521f842be2'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
load_stored_proc(op, ['add_new_product.sql'])
def downgrade():
load_stored_proc(op, ['add_new_product.sql'])
| Python | 0 | |
32420b44500caf48ade628bc4799fe91ad39e2b8 | add unit test for integer arithmetic | tests/core.py | tests/core.py | #! /usr/bin/env python3
#
# Copyright (c) 2015 Joost van Zwieten
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
import pylang.core, pylang.ee
import operator
def operator_rtzdiv(l, r):
if isinstance(l, pylang.core.Expression):
assert isinstance(r, pylang.core.Expression)
value = pylang.core.rtzdiv._operator_call(l, r)
if value == NotImplemented:
raise TypeError
else:
return value
else:
assert not isinstance(r, pylang.core.Expression)
sign = 1 if l*r > 0 else -1
return sign*(abs(l)//abs(r))
def eval_expression(expression):
module = pylang.core.Module()
test, entry = module.define_function('test', expression.dtype)
entry.ret(expression)
mod = pylang.ee.compile_and_load(module)
return mod.test()
def compare(dtype, op, *args):
return op(*args) == eval_expression(op(*map(dtype, args)))
class TestSignedIntegerArithmetic(unittest.TestCase):
dtypes = pylang.core.int8_t, pylang.core.int16_t, pylang.core.int32_t, \
pylang.core.int64_t
def test_neg(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.neg, 2))
def test_add(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.add, 2, 3))
def test_sub(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.sub, 2, 3))
def test_mul(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.mul, 2, 3))
def test_truediv(self):
for dtype in self.dtypes:
with self.assertRaises(TypeError):
compare(dtype, operator.truediv, 2, 3)
def test_floordiv(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.floordiv, 2, 3))
self.assertTrue(compare(dtype, operator.floordiv, 3, 3))
self.assertTrue(compare(dtype, operator.floordiv, 7, 3))
self.assertTrue(compare(dtype, operator.floordiv, -1, 3))
self.assertTrue(compare(dtype, operator.floordiv, -3, 3))
self.assertTrue(compare(dtype, operator.floordiv, -1, -3))
self.assertTrue(compare(dtype, operator.floordiv, -3, -3))
self.assertTrue(compare(dtype, operator.floordiv, 1, -3))
self.assertTrue(compare(dtype, operator.floordiv, 3, -3))
def test_rtzdiv(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator_rtzdiv, 2, 3))
self.assertTrue(compare(dtype, operator_rtzdiv, 3, 3))
self.assertTrue(compare(dtype, operator_rtzdiv, 7, 3))
self.assertTrue(compare(dtype, operator_rtzdiv, -1, 3))
self.assertTrue(compare(dtype, operator_rtzdiv, -3, 3))
self.assertTrue(compare(dtype, operator_rtzdiv, -1, -3))
self.assertTrue(compare(dtype, operator_rtzdiv, -3, -3))
self.assertTrue(compare(dtype, operator_rtzdiv, 1, -3))
self.assertTrue(compare(dtype, operator_rtzdiv, 3, -3))
class TestUnsignedIntegerArithmetic(unittest.TestCase):
dtypes = pylang.core.uint8_t, pylang.core.uint16_t, pylang.core.uint32_t, \
pylang.core.uint64_t
def test_neg(self):
for dtype in self.dtypes:
with self.assertRaises(TypeError):
compare(dtype, operator.neg, 2)
def test_add(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.add, 2, 3))
def test_sub(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.sub, 3, 2))
def test_mul(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.mul, 2, 3))
def test_truediv(self):
for dtype in self.dtypes:
with self.assertRaises(TypeError):
compare(dtype, operator.truediv, 2, 3)
def test_floordiv(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator.floordiv, 2, 3))
self.assertTrue(compare(dtype, operator.floordiv, 3, 3))
self.assertTrue(compare(dtype, operator.floordiv, 7, 3))
def test_rtzdiv(self):
for dtype in self.dtypes:
self.assertTrue(compare(dtype, operator_rtzdiv, 2, 3))
self.assertTrue(compare(dtype, operator_rtzdiv, 3, 3))
self.assertTrue(compare(dtype, operator_rtzdiv, 7, 3))
# vim: ts=4:sts=4:sw=4:et
| Python | 0.000508 | |
d836571a8dff59371d156dffea7290228305ca17 | add tests for reading shapefiles via ogr | tests/python_tests/ogr_test.py | tests/python_tests/ogr_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import *
from utilities import execution_path
import os, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'ogr' in mapnik.DatasourceCache.instance().plugin_names():
# Shapefile initialization
def test_shapefile_init():
s = mapnik.Ogr(file='../../demo/data/boundaries.shp',layer_by_index=0)
e = s.envelope()
assert_almost_equal(e.minx, -11121.6896651, places=7)
assert_almost_equal(e.miny, -724724.216526, places=6)
assert_almost_equal(e.maxx, 2463000.67866, places=5)
assert_almost_equal(e.maxy, 1649661.267, places=3)
# Shapefile properties
def test_shapefile_properties():
s = mapnik.Ogr(file='../../demo/data/boundaries.shp',layer_by_index=0,encoding='latin1')
f = s.features_at_point(s.envelope().center()).features[0]
eq_(f['CGNS_FID'], u'6f733341ba2011d892e2080020a0f4c9')
eq_(f['COUNTRY'], u'CAN')
eq_(f['F_CODE'], u'FA001')
eq_(f['NAME_EN'], u'Quebec')
# this seems to break if icu data linking is not working
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
eq_(f['Shape_Area'], 1512185733150.0)
eq_(f['Shape_Leng'], 19218883.724300001)
# Check that the deprecated interface still works,
# remove me once the deprecated code is cleaned up
eq_(f.properties['Shape_Leng'], 19218883.724300001)
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
| Python | 0 | |
bf8b19d19ea2a5f39cba90ca815560a89e476c6c | Create Output.py | Output.py | Output.py | import os, time, sys
from threading import Thread
pipe_name = '/Users/stevenrelin/Documents/pipe_eye.txt'
def child( ):
pipeout = os.open(pipe_name, os.O_WRONLY)
counter = 0
while True:
time.sleep(1)
os.write(pipeout, 'Number %03d\n' % counter)
counter = (counter+1) % 5
if not os.path.exists(pipe_name):
os.mkfifo(pipe_name)
t = Thread(target=child)
t.start()
| Python | 0.000108 | |
319d2115ad1130247caa5734572b7676e5bb0a6d | add offline plot of nexrad climo | scripts/feature/nexrad/climo.py | scripts/feature/nexrad/climo.py | import matplotlib.pyplot as plt
from pyiem.plot import maue
import datetime
import numpy as np
avgs = np.zeros((24, 53), 'f')
cnts = np.zeros((24, 53), 'f')
def make_y(ts):
if ts.hour >= 5:
return ts.hour - 5
return ts.hour + 19
maxv = 0
for line in open('nexrad35.txt'):
tokens = line.split(",")
ts = datetime.datetime.strptime(tokens[0], '%Y%m%d%H%M')
coverage = float(tokens[1])
if coverage > maxv:
print line
maxv = coverage
if ts.year > 1007:
avgs[make_y(ts), int(ts.strftime("%j"))/7-1] += coverage
cnts[make_y(ts), int(ts.strftime("%j"))/7-1] += 1.0
pixels = 6000 * 2400
(fig, ax) = plt.subplots(1, 1)
cmap = maue()
x, y = np.meshgrid(np.arange(53), np.arange(24))
m = ax.imshow(avgs / cnts / 100. * pixels, aspect='auto', interpolation='bicubic',
cmap=plt.get_cmap("gist_ncar"), extent=[0,53,24,0])
plt.colorbar(m, label='square miles, Iowa = 56,000')
ax.set_ylim(0, 24)
ax.set_yticks((0, 4, 8, 12, 16, 20))
ax.set_xticks(range(0, 55, 7))
ax.set_xticklabels(('Jan 1', 'Feb 19', 'Apr 8', 'May 27', 'Jul 15',
'Sep 2', 'Oct 21', 'Dec 9'))
ax.set_yticks(range(0, 24, 4))
ax.set_yticklabels(("Mid", "4 AM", "8 AM", "Noon", "4 PM", "8 PM"))
ax.set_ylabel("Central Daylight Time")
box = ax.get_position()
ax.set_position([box.x0, box.y0,
box.width * 0.95, box.height])
ax2 = ax.twinx()
ax2.set_yticks(range(0, 24, 4))
ax2.set_yticklabels(("5", "9", "13", "17", "21", "1"))
ax2.set_ylabel("UTC")
ax2.set_ylim(0, 24)
box = ax2.get_position()
ax2.set_position([box.x0, box.y0,
box.width * 0.95, box.height])
#ax.bar(np.arange(1, 366), avgs[:-1] / cnts[:-1] / 100. * pixels, fc='b', ec='b')
#ax.set_xticks((1,32,60,91,121,152,182,213,244,274,305,335,365))
#ax.set_xticklabels( ('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec') )
ax.grid(True, color='white')
#ax.set_xlim(0, 366)
#ax.set_ylabel("Areal Coverage of 35+ dbZ [sq miles], Iowa=56,000")
ax.set_title(("Climatology of 35+ dbZ Returns over CONUS\n"
"Based on 1996-2015 IEM Composites of NWS NEXRAD"))
ax.set_xlabel("Partitioned by Week of Year, Smoothed")
fig.savefig('test.png')
| Python | 0 | |
5f20962d300850200ed796f941bf98662736d4da | Add server.py to serve files in the user's specified share dir | sandwich/server.py | sandwich/server.py | from os import curdir, sep, path
import time
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import config
class StaticServeHandler(BaseHTTPRequestHandler):
def do_GET(self):
if not config.shared_directory:
self.send_error(404, 'User not sharing files')
return
try:
f = open(path.expanduser(config.shared_directory) + self.path, 'rb')
self.send_response(200)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
class SandwichServer(object):
def __init__(self, ):
pass
def run(self, port):
try:
self.port = port
self.server = HTTPServer(('', self.port), StaticServeHandler)
print 'started httpserver...'
self.server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
self.server.socket.close()
if __name__ == '__main__':
ss = SandwichServer()
ss.run(8000)
| Python | 0 | |
d774bb7caa9637e4d453e19fcc43ee7b9b17702c | add script for computing WWA % times | scripts/sbw/wfo_time_percent.py | scripts/sbw/wfo_time_percent.py | import iemdb
import numpy
import network
nt = network.Table("WFO")
POSTGIS = iemdb.connect('postgis', bypass=True)
pcursor = POSTGIS.cursor()
import mx.DateTime
sts = mx.DateTime.DateTime(2005,10,1)
ets = mx.DateTime.DateTime(2013,1,1)
interval = mx.DateTime.RelativeDateTime(hours=3)
bins = (ets - sts).minutes
for wfo in nt.sts.keys():
wfo = wfo[-3:]
counts = numpy.zeros( (int(bins)), 'f')
pcursor.execute("""SELECT distinct issue, expire from warnings where wfo = '%s'
and issue > '2005-10-01' and expire < '2013-01-01' and gtype = 'C'
and phenomena = 'SC' """ % (wfo,))
for row in pcursor:
issue = mx.DateTime.strptime(row[0].strftime("%Y%m%d%H%M"), "%Y%m%d%H%M")
expire = mx.DateTime.strptime(row[1].strftime("%Y%m%d%H%M"), "%Y%m%d%H%M")
idx1 = int((issue - sts).minutes)
idx2 = int((expire - sts).minutes)
counts[idx1:idx2] = 1
print "%s,%.4f" % (wfo, numpy.sum( counts ) / float(bins))
pcursor.execute("""INSERT into ferree3(wfo, percentage) values (%s,%s)""",
(wfo, float(numpy.sum( counts ) / float(bins))))
POSTGIS.commit()
pcursor.close()
POSTGIS.commit()
POSTGIS.close() | Python | 0 | |
bcb65eb61c711b184114910c8d8c641278db5130 | Add frozen/equilibrium wake model helpers | bem/models.py | bem/models.py | import numpy as np
class FrozenWakeAerodynamics:
"""Calculate induced flows once in given initial conditions"""
def __init__(self, bem_model, initial_wind_speed,
initial_rotor_speed, initial_pitch_angle):
self.bem_model = bem_model
# Find the frozen wake state
self.wake_state = bem_model.solve_wake(initial_wind_speed,
initial_rotor_speed,
initial_pitch_angle)
def forces(self, wind_speed, rotor_speed, pitch_angle, rho):
shape_test = (np.asarray(wind_speed) *
np.asarray(rotor_speed) *
np.asarray(pitch_angle))
if shape_test.ndim == 0:
# Single value
factors = self.wake_state / [wind_speed, rotor_speed]
factors[:, 1] /= self.bem_model.radii
forces = self.bem_model.forces(wind_speed, rotor_speed,
pitch_angle, rho, factors)
elif shape_test.ndim == 1:
# Multiple values
inputs = np.zeros((len(shape_test), 3))
inputs[:, 0] = wind_speed
inputs[:, 1] = rotor_speed
inputs[:, 2] = pitch_angle
forces = np.zeros((inputs.shape[0], self.wake_state.shape[0], 2))
for i in range(forces.shape[0]):
factors = self.wake_state / inputs[i, :2]
factors[:, 1] /= self.bem_model.radii
forces[i] = self.bem_model.forces(*inputs[i], rho=rho,
factors=factors)
else:
raise ValueError("Bad input shapes: {}".format(shape_test.shape))
return forces
class EquilibriumWakeAerodynamics:
"""Calculate induced flow for each requested set of conditions"""
def __init__(self, bem_model):
self.bem_model = bem_model
def forces(self, wind_speed, rotor_speed, pitch_angle, rho):
shape_test = (np.asarray(wind_speed) *
np.asarray(rotor_speed) *
np.asarray(pitch_angle))
if shape_test.ndim == 0:
# Single value
wake_state = self.bem_model.solve_wake(wind_speed,
rotor_speed,
pitch_angle)
factors = wake_state / [wind_speed, rotor_speed]
factors[:, 1] /= self.bem_model.radii
forces = self.bem_model.forces(wind_speed, rotor_speed,
pitch_angle, rho, factors)
elif shape_test.ndim == 1:
# Multiple values
inputs = np.zeros((len(shape_test), 3))
inputs[:, 0] = wind_speed
inputs[:, 1] = rotor_speed
inputs[:, 2] = pitch_angle
forces = np.zeros((inputs.shape[0], self.wake_state.shape[0], 2))
for i in range(forces.shape[0]):
wake_state = self.bem_model.solve_wake(*inputs[i])
factors = wake_state / inputs[i, :2]
factors[:, 1] /= self.bem_model.radii
forces[i] = self.bem_model.forces(*inputs[i], rho=rho,
factors=factors)
else:
raise ValueError("Bad input shapes: {}".format(shape_test.shape))
return forces
| Python | 0 | |
e56c3be6dc3ab8bf31b7ce9a3d3db275b18207f0 | Create sql-all.py | Django/sql-all.py | Django/sql-all.py | $ ./manage.py sqlall name-app
'''
CommandError: App 'name-app' has migrations.
Only the sqlmigrate and sqlflush commands can be used when an app has migrations.
'''
So there before migrate to see it.
| Python | 0.000706 | |
a8ee7f46ffd4611a153538e05749bd99b4a98cbc | add checkPID | check-pid.py | check-pid.py | # โค้ดเซ็คความถูกต้องของบัตรประชาชน
# เขียนโดย วรรณพงษ์ ภัททิยไพบูลย์
# wannaphong@yahoo.com
# https://python3.wannaphong.com
def checkPID(pid):
if(len(pid) != 13): # ถ้า pid ไม่ใช่ 13 ให้คืนค่า False
return False
num=0 # ค่าสำหรับอ้างอิง index list ข้อมูลบัตรประชาชน
num2=13 # ค่าประจำหลัก
listdata=list(pid) # list ข้อมูลบัตรประชาชน
sum=0 # ผลลัพธ์
while num<12:
sum+=int(listdata[num])*(num2-num) # นำค่า num เป็น index list แต่ละตัว * (num2 - num) แล้วรวมเข้ากับ sum
num+=1 # เพิ่มค่า num อีก 1
digit13 = sum%11 # sum หาร 11 เอาเศษ
if digit13==0: # ถ้าเศษ = 0
digit13=1 # ค่าหลักที่ 13 คือ 1
elif digit13==1: # ถ้าเศษ = 1
digit13=0 # ค่าหลักที่ 13 คือ 0
else:
digit13=11-digit13 # ถ้าเศษไม่ใช่กับอะไร ให้เอา 11 - digit13
if digit13==int(listdata[12]): # ถ้าค่าหลักที่ 13 เท่ากับค่าหลักที่ 13 ที่ป้อนข้อมูลมา คืนค่า True
return True
else: # ถ้าค่าหลักที่ 13 ไม่เท่ากับค่าหลักที่ 13 ที่ป้อนข้อมูลมา คืนค่า False
return False
a=checkPID("เลขบัตรประชาชน")
print(a) # ถ้าถูกต้อง คือ True ถ้าไม่ถูก คือ False | Python | 0 | |
43841114f4403b46e0ef077be6e0832ce690dfb2 | add ipy_workdir | IPython/Extensions/ipy_workdir.py | IPython/Extensions/ipy_workdir.py | #!/usr/bin/env python
import IPython.ipapi
ip = IPython.ipapi.get()
import os
workdir = None
def workdir_f(line):
global workdir
dummy,cmd = line.split(None,1)
if os.path.isdir(cmd):
workdir = cmd
print "Set workdir",workdir
elif workdir is None:
print "Please set workdir first by doing e.g. 'workdir q:/'"
else:
print "Execute command in",workdir
cwd = os.getcwd()
os.chdir(workdir)
try:
ip.runlines(cmd)
finally:
os.chdir(cwd)
ip.defalias("workdir",workdir_f)
| Python | 0.000001 | |
b78ba3220a64e9b01b3fc8c61ada0e85dc1157fc | Implement data dumper | oeplatform/dumper.py | oeplatform/dumper.py | import oeplatform.securitysettings as sec
import sqlalchemy as sqla
from subprocess import call
import os
excluded_schemas = [
"information_schema",
"public",
"topology",
"reference",
]
def connect():
engine = _get_engine()
return sqla.inspect(engine)
def _get_engine():
engine = sqla.create_engine(
'postgresql://{0}:{1}@{2}:{3}/{4}'.format(
sec.dbuser,
sec.dbpasswd,
sec.dbhost,
sec.dbport,
sec.dbname))
return engine
insp = connect()
for schema in insp.get_schema_names():
if schema not in excluded_schemas:
if not os.path.exists(sec.datarepowc + schema):
os.mkdir(sec.datarepowc + schema)
for table in insp.get_table_names(schema=schema):
if not table.startswith('_'):
if not os.path.exists(sec.datarepowc + schema + '/' + table):
os.mkdir(sec.datarepowc + schema + '/' + table)
L = ['pg_dump', '-h', sec.dbhost, '-U', sec.dbuser, '-d',
sec.dbname, '-F', 'd', '-f',
sec.datarepowc + schema + '/' + table, '-t',
schema + '.' + table, '-w', ]
print(L)
call(L)
call(['tar', '-zcf',
sec.datarepowc + schema + '/' + table + '.tar.gz',
sec.datarepowc + schema + '/' + table])
call(['rm', '-r',
sec.datarepowc + schema + '/' + table])
| Python | 0.000016 | |
0254fbea5218e332dc0c54af198aa2b29381878b | Composite two smiley on top of the famous Michael jordan crying face | python/composite.py | python/composite.py | import requests
import json
# Composite two smiley on top of the famous Michael jordan crying face.
# A more sophisticated approach would be to extract the face landmarks using facelandmarks and composite something on the different regions.
# https://pixlab.io/#/cmd?id=merge for more info.
req = requests.post('https://api.pixlab.io/merge',
headers={'Content-Type':'application/json'},
data=json.dumps({
'src':'https://pbs.twimg.com/media/CcEfpp0W4AEQVPf.jpg',
'key':'My_Pix_Key',
'cord':[
{
'img': 'http://www.wowpng.com/wp-content/uploads/2016/10/lol-troll-face-png-image-october-2016-370x297.png',
'x': 30,
'y': 320
},
{
'img': 'http://orig08.deviantart.net/67d1/f/2010/216/6/7/lol_face_by_bloodyhalfdemon.png',
'x': 630,
'y': 95
}]
})
)
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic Link: "+ reply['link'])
| Python | 0.999822 | |
8ce580d1f0890f72ab60efa4219de26b64ece897 | Add example skeleton script | example/example.py | example/example.py | #!/usr/bin/env python
import sys
from argparse import ArgumentParser
from getpass import getpass
class BigFixArgParser(ArgumentParser):
name = "hodor.py [options]"
def __init__(self):
description = "A tool for creating a smarter planet"
usage = """Options:
-h, --help Print this help message and exit
-s, --server SERVER[:PORT] REST API server and port
-u, --user USER[:PASSWORD] REST API user and password
-k, --insecure Don't verify the HTTPS connection to the server
-c, --cacert FILE CA certificate used to verify the server's HTTPS
certificate"""
super(BigFixArgParser, self).__init__(add_help=False,
usage=usage, description=description)
self.add_argument('-k', '--insecure', action='store_true')
self.add_argument('-c', '--cacert')
self.add_argument('-u', '--user', required=True)
self.add_argument('-s', '--server', required=True)
def parse_args(self):
self.usage = "{0}\n\n{1}\n\n{2}".format(self.name,
self.description, self.usage)
if '-h' in sys.argv or '--help' in sys.argv:
print(self.usage)
sys.exit()
args = super(BigFixArgParser, self).parse_args()
if ':' not in args.user:
prompt = "Enter password for user '{0}': ".format(args.user)
args.user = args.user + ':' + getpass(prompt)
return args
parser = BigFixArgParser()
print(parser.parse_args())
| Python | 0.000001 | |
30a4cb3794d52d1743dc482f2c2a83ced1dcbd90 | Make a clean report along with BLEU scores | session2/report.py | session2/report.py | import argparse, codecs, logging
import unicodecsv as csv
from nltk.align.bleu_score import bleu
import numpy as np
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument('src', 'Source file')
parser.add_argument('target', 'Translated data')
parser.add_argument('gold', 'Gold output file')
parser.add_argument('model', 'Model Name')
args = parser.parse_args()
return args
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
args = setup_args()
logging.info(args)
f = codecs.open('%s-%s.csv'% (args.out, args.suffix), 'w')
csv_f = csv.writer(f, delimiter=',', encoding='utf-8')
src_lines = codecs.open(args.src, 'r', 'utf-8').readlines()
src_lines_nounk = codecs.open(args.src + '.nounk', 'r', 'utf-8').readlines()
target_lines = codecs.open(args.target, 'r', 'utf-8').readlines()
target_lines_nounk = codecs.open(args.target + '.nounk', 'r', 'utf-8').readlines()
gold_lines = codecs.open(args.gold, 'r', 'utf-8').readlines()
gold_lines_nounk = codecs.open(args.gold + '.nounk', 'r', 'utf-8').readlines()
data = ['Src', 'Src_UNK', 'Target_UNK', 'Target', 'Gold_UNK', 'Gold', 'BLEU']
num_lines = len(gold_lines)
logging.info('Num Lines: %d'% num_lines)
bleu_scores = []
for index in range(num_lines):
data = []
data.append(src_lines_nounk[index].strip())
data.append(src_lines[index].strip())
data.append(target_lines[index].strip())
data.append(target_lines_nounk[index].strip())
data.append(gold_lines[index].strip())
data.append(gold_lines_nounk[index].strip())
bleu_score = bleu(target_lines[index].split(), [gold_lines[index].split()], [1])
bleu_scores.append(bleu_score)
data.append(str(bleu_score))
csv_f.writerow(data)
logging.info('Average BLEU Score: %f'% np.mean(bleu_scores))
if __name__ == '__main__':
main() | Python | 0.000001 | |
31bb487a2f75268cb0b60ef4539935df83b68a84 | Add auto solver for "W3-Radix Sorts". | quiz/3-radixsort.py | quiz/3-radixsort.py | #!/usr/bin/env python3
def make_arr(text):
return text.strip().split(' ')
def print_arr(arr):
for t in arr:
print(t, end=' ')
print()
def solve_q1(arr, time):
for t in range(len(arr[0]) - 1, time - 1, -1):
arr = sorted(arr, key=lambda x: x[t])
return arr
def msd_radix_sort(arr, start, end, depth):
if end - start <= 1:
return
global msd_radix_sort_left
if msd_radix_sort_left <= 0:
return
msd_radix_sort_left -= 1
arr[start:end] = sorted(arr[start:end], key=lambda x: x[depth])
pre_n = start
pre_v = arr[pre_n][depth]
for i in range(start, end):
if arr[i][depth] != pre_v:
pre_v = arr[i][depth]
msd_radix_sort(arr, pre_n, i, depth + 1)
pre_n = i
msd_radix_sort(arr, pre_n, end, depth + 1)
def solve_q2(arr, time):
global msd_radix_sort_left
msd_radix_sort_left = time
msd_radix_sort(arr, 0, len(arr), 0)
return arr
def solve_q3(arr):
k = arr[0][0]
l = 0
m = l
h = len(arr) - 1
while m <= h:
v = arr[m][0]
if v < k:
arr[m], arr[l] = arr[l], arr[m]
m += 1
l += 1
elif v == k:
m += 1
else: # arr[m] > k
arr[m], arr[h] = arr[h], arr[m]
h -= 1
return arr
q1 = ' 4322 4441 1244 3122 1332 2131 4431 3113 2244 1241'
q2 = ' 1324 3314 1122 3112 4423 3321 3344 4223 1412 1344 4314 4412 1333 2323 3243 '
q3 = ' 5552 5255 3462 2614 6432 5252 6543 6152 5156 5434 '
print_arr(solve_q1(make_arr(q1), 2))
print_arr(solve_q2(make_arr(q2), 3))
print_arr(solve_q3(make_arr(q3)))
| Python | 0 | |
85e7d3b4f69919b274e597b7e8f73377e7d28698 | Add another script for testing purposes | process_datasets.py | process_datasets.py | """
For testing purposes: Process a specific page on the Solr index.
"""
import os
import sys
import datetime
import json
import uuid
import pandas
import xml.etree.ElementTree as ET
import urllib
from d1graphservice.people import processing
from d1graphservice import settings
from d1graphservice import dataone
from d1graphservice import util
from d1graphservice import validator
from d1graphservice import store
from d1graphservice import multi_store
from d1graphservice.people import processing
from d1graphservice.people.formats import eml
from d1graphservice.people.formats import dryad
from d1graphservice.people.formats import fgdc
if __name__ == "__main__":
# query = "https://cn.dataone.org/cn/v1/query/solr/?fl=author,identifier,title,authoritativeMN&q=author:*Jones*Matthew*&rows=1000&start=0"
# query = "https://cn.dataone.org/cn/v1/query/solr/?fl=author,identifier,title,authoritativeMN&q=author:*Jones*&rows=20&start=0"
query = "https://cn.dataone.org/cn/v1/query/solr/?fl=author,identifier,title,authoritativeMN&q=author:Jeremy*Jones*&rows=20&start=0"
cache_dir = "/Users/mecum/src/d1dump/documents/"
formats_map = util.loadFormatsMap()
namespaces = {
"foaf": "http://xmlns.com/foaf/0.1/",
"dcterms": "http://purl.org/dc/terms/",
"datacite": "http://purl.org/spar/datacite/",
"owl": "http://www.w3.org/2002/07/owl#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"glview": "http://schema.geolink.org/dev/view/",
"d1people": "https://dataone.org/person/",
"d1org": "https://dataone.org/organization/",
"d1resolve": "https://cn.dataone.org/cn/v1/resolve/",
"prov": "http://www.w3.org/ns/prov#",
"d1node": "https://cn.dataone.org/cn/v1/node/",
"d1landing": "https://search.dataone.org/#view/",
"d1repo": "https://cn.dataone.org/cn/v1/node/"
}
# Load triple stores
stores = {
'people': store.Store("http://localhost:3030/", 'ds', namespaces),
'organizations': store.Store("http://localhost:3131/", 'ds', namespaces),
'datasets': store.Store("http://localhost:3232/", 'ds', namespaces)
}
for store_name in stores:
stores[store_name].delete_all()
stores = multi_store.MultiStore(stores, namespaces)
vld = validator.Validator()
page_xml = util.getXML(query)
documents = page_xml.findall(".//doc")
for doc in documents:
identifier = doc.find(".//str[@name='identifier']").text
print identifier
scimeta = dataone.getScientificMetadata(identifier, cache=True)
if scimeta is None:
continue
records = processing.extractCreators(identifier, scimeta)
# Add records and organizations
people = [p for p in records if 'type' in p and p['type'] == 'person']
organizations = [o for o in records if 'type' in o and o['type'] == 'organization']
# Always do organizations first, so peoples' organization URIs exist
for organization in organizations:
organization = vld.validate(organization)
stores.addOrganization(organization)
for person in people:
person = vld.validate(person)
stores.addPerson(person)
stores.addDataset(doc, scimeta, formats_map)
stores.save()
| Python | 0 | |
d959587c168424ed0d8e91a4a20ea36076a646b7 | add forgotten __init__.py | dhcpcanon/__init__.py | dhcpcanon/__init__.py | __version__ = "0.1"
__author__ = "juga"
| Python | 0.00035 | |
98fe743217ebd7868d11d8518f25430539eae5a0 | add regrresion example | example/simple_regression_example.py | example/simple_regression_example.py | from sklearn import datasets, metrics, preprocessing
from stacked_generalization.lib.stacking import StackedRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
boston = datasets.load_boston()
X = preprocessing.StandardScaler().fit_transform(boston.data)
breg = LinearRegression()
regs = [RandomForestRegressor(n_estimators=50, random_state=1),
GradientBoostingRegressor(n_estimators=25, random_state=1),
ExtraTreesRegressor(),
Ridge(random_state=1)]
sr = StackedRegressor(breg,
regs,
n_folds=3,
verbose=0)
sr.fit(X, boston.target)
score = metrics.mean_squared_error(sr.predict(X), boston.target)
print ("MSE: %f" % score) | Python | 0 | |
896270bcd99b26e4128fd35dd3821a59807ae850 | Add the model.py file declarative generated from mysql. | doc/model/model_decla.py | doc/model/model_decla.py | #autogenerated by sqlautocode
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
engine = create_engine('mysql://monty:passwd@localhost/test_dia')
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
metadata.bind = engine
class Metering(DeclarativeBase):
__tablename__ = 'Metering'
__table_args__ = {}
#column definitions
date = Column(u'date', DATE())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
sensor_id = Column(u'sensor_id', INTEGER(), ForeignKey('Sensor.id'))
value = Column(u'value', FLOAT())
#relation definitions
Sensor = relation('Sensor', primaryjoin='Metering.sensor_id==Sensor.id')
class Sensor(DeclarativeBase):
__tablename__ = 'Sensor'
__table_args__ = {}
#column definitions
bus_adress = Column(u'bus_adress', VARCHAR(length=255))
description = Column(u'description', VARCHAR(length=255))
high_threshold = Column(u'high_threshold', FLOAT())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
low_threshold = Column(u'low_threshold', FLOAT())
max_value = Column(u'max_value', FLOAT())
min_value = Column(u'min_value', FLOAT())
name = Column(u'name', VARCHAR(length=255))
unique_key = Column(u'unique_key', VARCHAR(length=255))
unit = Column(u'unit', VARCHAR(length=255))
unit_label = Column(u'unit_label', VARCHAR(length=255))
#relation definitions
| Python | 0 | |
7aab44f006a6412d8f169c3f9a801f41a6ea0a95 | Remove start dates for the second time from draft dos2 briefs | migrations/versions/880_remove_invalid_draft_dos2_brief_dates_again.py | migrations/versions/880_remove_invalid_draft_dos2_brief_dates_again.py | """Remove dates from draft dos2 briefs.
This is identical to the previous migration but will be run again to cover any draft briefs with invalid
dates that could have appeared during the previous API rollout process (after the previous migration but before
the code propogated fully to the ec2 instances).
Revision ID: 880
Revises: 870
Create Date: 2016-04-07
"""
# revision identifiers, used by Alembic.
revision = '880'
down_revision = '870'
from alembic import op
import sqlalchemy as sa
frameworks_table = sa.Table(
'frameworks',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True)
)
briefs_table = sa.Table(
'briefs',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('framework_id', sa.Integer, nullable=False),
sa.Column('published_at', sa.DateTime, nullable=True),
sa.Column('data', sa.JSON, nullable=True)
)
def upgrade():
"""Remove question and answer for startDate from briefs.data for draft dos2 briefs."""
conn = op.get_bind()
# SELECT id, data
# FROM briefs JOIN frameworks ON briefs.framework_id = frameworks.id
# WHERE frameworks.slug = 'digital-outcomes-and-specialists-2' AND briefs.published_at IS null;
query = briefs_table.join(
frameworks_table,
briefs_table.c.framework_id == frameworks_table.c.id
).select(
sa.and_(
frameworks_table.c.slug == 'digital-outcomes-and-specialists-2',
briefs_table.c.published_at == sa.null()
)
).with_only_columns(
(
briefs_table.c.id,
briefs_table.c.data
)
)
results = conn.execute(query).fetchall()
for brief_id, brief_data in results:
if brief_data.pop('startDate', None) is not None:
# UPDATE briefs SET data = _brief_data WHERE id = _brief_id;
query = briefs_table.update().where(briefs_table.c.id==brief_id).values(data=brief_data)
conn.execute(query)
def downgrade():
pass
| Python | 0.000002 | |
77ccb8db873c31ad2bd8318118410abab3141312 | add __version__.py | europilot/__version__.py | europilot/__version__.py | __title__ = 'europilot'
__description__ = 'End to end driving simulation inside Euro Truck Simulator 2'
__version__ = '0.0.1'
| Python | 0.001013 | |
8c82465a08f5a601e6a43a8eb675136fc3678954 | Create lc960.py | LeetCode/lc960.py | LeetCode/lc960.py | def createArray(dims) :
if len(dims) == 1:
return [0 for _ in range(dims[0])]
return [createArray(dims[1:]) for _ in range(dims[0])]
def f(A, x, y):
m = len(A)
for i in range(m):
if A[i][x] > A[i][y]:
return 0
return 1
class Solution(object):
def minDeletionSize(self, A):
"""
:type A: List[str]
:rtype: int
"""
n = len(A[0])
g = createArray([n, n])
for i in range(n):
for j in range(i+1, n):
g[i][j] = f(A, i, j)
dp = createArray([n])
for i in range(0, n):
dp[i] = 1
for j in range(0, i):
if g[j][i] == 1:
if dp[i] < dp[j] + 1:
dp[i] = dp[j] + 1
return n - max(dp)
| Python | 0.000001 | |
3ebae0f57ae3396213eb28b6fc7a23ff3e3c4980 | Create file and add pseudocode | uml-to-cpp.py | uml-to-cpp.py | # Copyright (C) 2017 Bran Seals. All rights reserved.
# Created: 2017-06-05
print("== UML to CPP ==")
print("Create or modify C++ header and implementation files by plaintext UML.")
#print("Enter a UML filename: ") # file import currently disabled
# check if file isn't too bonkers
#uml = [] # pull UML into memory as string list
# check if file is properly formatted
classList = [] # list of classes that will be created, along with members
noteList = [] # if weird things happen, this list will show potential errors
# will be displayed after files are created for user info
# while uml list items exist:
# get class name
# while } not reached:
# if +, put into hppPub
# if -, put into hppPriv
# if neither, put into hppPriv and add message to noteList
# use these to create UmlClass object and append to classList
# for each in classList:
# build hpp list using hpp, hppPublic, hppPrivate
# checkForLibs()
# while hpp list item exists:
# if isFunction, append to functions list
# while functions list item exists:
# format function and append to cpp list
# create name.hpp file and write using hpp list
# create name.cpp file and write using cpp list
# remove object from classList?
class UmlClass:
def __init__(self, className, hppPub, hppPriv):
self.name = className
self.hppPublic = list(hppPub)
self.hppPrivate = list(hppPriv)
functions = [] # list of functions used to build cpp file
hpp = [] # will contain final hpp template, built from hppPub, hppPriv
cpp = [] # same as hpp, but with implementation file
#def isFunction(): # looks for function syntax
# used when creating cpp file from hpp list
#def checkForLibs(): # include libraries for data types that need them
#def formatFunc(): # formats function from hpp to cpp style
# also takes into account return type and variable names | Python | 0.000001 | |
068b33dc89350615a96e9d3856df4b6ca30ca6c5 | add distributed training | sres_multi_gpu_train.py | sres_multi_gpu_train.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import sres
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/sres_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the model.
Args:
scope: unique prefix string identifying the tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images
real_images = sres.distorted_inputs()
# Build inference Graph.
fake_images = sres.generator(real_images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = sres.loss(real_images, fake_images)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % sres.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Create an optimizer that performs gradient descent.
opt = tf.train.AdamOptimizer(sres.INITIAL_LEARNING_RATE, beta1=0.5)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (sres.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the model. This function
# constructs the entire model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Calculate the gradients for the batch of data on this tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
train_op = opt.apply_gradients(grads, global_step=global_step)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.3f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
sres.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| Python | 0 | |
66e707a280c193a88ddd472758c5adf6d09d9e94 | Update to clean_tenants.py to speed up tenant deletion | utils/clean_tenants.py | utils/clean_tenants.py | #!/usr/bin/env python
from __future__ import (print_function, unicode_literals, division,
absolute_import)
import sys
import threading
from builtins import input
try:
import Queue as queue
except ImportError:
import queue
import dfs_sdk
from dfs_sdk import scaffold
def _deleter(args, api, q):
while True:
tenant = q.get()
tpath = tenant['path']
apps = api.app_instances.list(tenant=tpath)
inits = api.initiators.list(tenant=tpath)
if ((args.non_empty or len(apps) == 0) and
args.clean):
for ai in apps:
ai.set(admin_state='offline',
force=True,
tenant=tpath)
ai.delete(tenant=tpath)
for init in inits:
if init.tenant == tenant:
try:
init.delete(tenant=tpath, force=True)
except dfs_sdk.exceptions.ApiInvalidRequestError as e:
print(e)
except dfs_sdk.exceptions.ApiNotFoundError as e:
print(e)
if args.openstack_only and tenant['name'].startswith("OS-"):
print("Openstack Tenant: ", tpath)
try:
tenant.delete(tenant=tpath)
except dfs_sdk.exceptions.ApiInvalidRequestError as e:
print(e)
elif not args.openstack_only:
print("Tenant", tpath)
try:
tenant.delete(tenant=tpath)
except dfs_sdk.exceptions.ApiInvalidRequestError as e:
print(e)
q.task_done()
def main(args):
api = scaffold.get_api()
to_delete = queue.Queue()
for tenant in api.tenants.list():
if tenant['path'].endswith('root'):
continue
if args.openstack_only and not tenant['name'].startswith('OS-'):
continue
to_delete.put(tenant)
yes = False
if args.yes:
yes = True
else:
newq = queue.Queue()
while True:
try:
tenant = to_delete.get(block=False)
except queue.Empty:
break
print(tenant['path'])
newq.put(tenant)
to_delete = newq
resp = input("Are you sure you want to delete these? [Y/N]\n")
if resp.strip() in ("Y", "yes"):
yes = True
if yes:
print("Deleting")
for _ in args.workers:
thread = threading.Thread(target=_deleter,
args=(args, api, to_delete))
thread.daemon = True
thread.start()
to_delete.join()
sys.exit(0)
else:
print("Cancelling")
sys.exit(1)
if __name__ == "__main__":
parser = scaffold.get_argparser()
parser.add_argument("-c", "--clean", action='store_true',
help="Clean empty tenants")
parser.add_argument("-o", "--openstack-only", action='store_true',
help="Clean only openstack tenants")
parser.add_argument("-y", "--yes", action='store_true',
help="DANGER!!! Bypass confirmation prompt")
parser.add_argument("-n", "--non-empty", action='store_true',
help="Clean non-empty tenants as well")
parser.add_argument("-w", "--workers", default=5)
args = parser.parse_args()
main(args)
sys.exit(0)
| #!/usr/bin/env python
from __future__ import (print_function, unicode_literals, division,
absolute_import)
import sys
from builtins import input
from dfs_sdk import scaffold
def main(args):
api = scaffold.get_api()
to_delete = []
for tenant in api.tenants.list():
if tenant['path'].endswith('root'):
continue
tpath = tenant['path']
if ((not api.app_instances.list(tenant=tpath) or args.non_empty) and
args.clean):
if args.openstack_only:
if tenant['name'].startswith("OS-"):
print("Openstack Tenant: ", tpath)
to_delete.append(tenant)
else:
print("Tenant", tpath)
to_delete.append(tenant)
yes = False
if args.yes:
yes = True
else:
resp = input("Are you sure you want to delete these? [Y/N]\n")
if resp.strip() in ("Y", "yes"):
yes = True
if yes:
print("Deleting")
for t in to_delete:
for ai in api.app_instances.list(tenant=t['path']):
ai.set(admin_state="offline", tenant=t['path'], force=True)
ai.delete(tenant=t['path'])
t.delete()
sys.exit(0)
else:
print("Cancelling")
sys.exit(1)
if __name__ == "__main__":
parser = scaffold.get_argparser()
parser.add_argument("-c", "--clean", action='store_true',
help="Clean empty tenants")
parser.add_argument("-o", "--openstack-only", action='store_true',
help="Clean only openstack tenants")
parser.add_argument("-y", "--yes", action='store_true',
help="DANGER!!! Bypass confirmation prompt")
parser.add_argument("-n", "--non-empty", action='store_true',
help="Clean non-empty tenants as well")
args = parser.parse_args()
main(args)
sys.exit(0)
| Python | 0 |
e2ed635fb3289a5b45f5f15cd1eb543d87fb93d7 | Add test for posting a review through the view | wafer/talks/tests/test_review_views.py | wafer/talks/tests/test_review_views.py | """Tests for wafer.talk review form behaviour."""
from django.test import Client, TestCase
from django.urls import reverse
from reversion import revisions
from reversion.models import Version
from wafer.talks.models import (SUBMITTED, UNDER_CONSIDERATION,
ReviewAspect, Review)
from wafer.talks.forms import ReviewForm, make_aspect_key
from wafer.tests.utils import create_user
from wafer.talks.tests.fixtures import create_talk
class ReviewFormTests(TestCase):
def setUp(self):
self.reviewer_a = create_user('reviewer_a', perms=('add_review',))
self.talk_a = create_talk('Talk A', SUBMITTED, "author_a")
with revisions.create_revision():
# Ensure we have an initial revision
self.talk_a.save()
self.aspect_1 = ReviewAspect.objects.create(name='General')
self.aspect_2 = ReviewAspect.objects.create(name='Other')
self.client = Client()
def test_review_submission(self):
"""Test that submitting a review works"""
self.client.login(username='reviewer_a', password='reviewer_a_password')
self.assertTrue(Version.objects.get_for_object(self.talk_a), 1)
response = self.client.post(reverse('wafer_talk_review', kwargs={'pk': self.talk_a.pk}),
data={'notes': 'Review notes',
make_aspect_key(self.aspect_1): '1',
make_aspect_key(self.aspect_2): '2'})
self.assertEqual(response.status_code, 302)
review = Review.objects.get(talk=self.talk_a, reviewer=self.reviewer_a)
self.assertEqual(review.avg_score, 1.5)
self.talk_a.refresh_from_db()
self.assertEqual(self.talk_a.status, UNDER_CONSIDERATION)
self.assertTrue(Version.objects.get_for_object(self.talk_a), 2)
self.assertTrue(review.is_current())
| Python | 0 | |
466410249867b3eadbe5e2b59c46c95ecd288c6c | Add script for word counts | python_scripts/solr_query_fetch_all.py | python_scripts/solr_query_fetch_all.py | #!/usr/bin/python
import requests
import ipdb
import time
import csv
import sys
import pysolr
def fetch_all( solr, query ) :
documents = []
num_matching_documents = solr.search( query ).hits
start = 0
rows = num_matching_documents
sys.stderr.write( ' starting fetch for ' + query )
while ( len( documents ) < num_matching_documents ) :
results = solr.search( query, **{
'start': start,
'rows': rows,
# 'fl' : 'media_id',
})
documents.extend( results.docs )
start += rows
assert len( documents ) <= num_matching_documents
assert len( documents ) == num_matching_documents
return documents
solr = pysolr.Solr('http://localhost:8983/solr/')
queries = [ '*:*',
]
for query in queries:
print query
results = fetch_all( solr, query )
print "got " + query
print results
| Python | 0.000001 | |
3f69fae4f15efff515b82f216de36dd6d57807e9 | add ci_test.py file for ci | settings/ci_test.py | settings/ci_test.py | __author__ = 'quxl'
from base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| Python | 0.000001 | |
50f0e040f363e52a390efc6acd1bc0bc0ddcabcc | Add test funcs in report_reader for DB reading | report_reader.py | report_reader.py | import pymongo as pm
def connectDB():
conn = pm.MongoClient('localhost', 27017)
db = conn.get_database('report_db')
return db
def getColList(db):
return db.collection_names()
def getDocNum(col):
return col.find().count()
def match(col, matchDict):
return list(col.find(matchDict))
def main():
db = connectDB()
print(getColList(db))
col = db['col20170503']
print(getDocNum(col))
print(match(col, {'school':'HFUT'}))
if __name__ == '__main__':
main() | Python | 0.000001 | |
1c9d398be7f99f15fb550adca31f3366870930e3 | Set debug to false in prod, otherwise true | wazimap_np/settings.py | wazimap_np/settings.py | # pull in the default wazimap settings
from wazimap.settings import * # noqa
DEBUG = False if (os.environ.get('APP_ENV', 'dev') == 'prod') else True
# install this app before Wazimap
INSTALLED_APPS = ['wazimap_np'] + INSTALLED_APPS
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://wazimap_np:wazimap_np@localhost/wazimap_np')
DATABASES['default'] = dj_database_url.parse(DATABASE_URL)
DATABASES['default']['ATOMIC_REQUESTS'] = True
SCHEME = 'http' if (os.environ.get('APP_ENV', 'dev') == 'dev') else 'https'
URL = SCHEME+'://'+'nepalmap.org'
# Localise this instance of Wazimap
WAZIMAP['name'] = 'NepalMap'
# NB: this must be https if your site supports HTTPS.
WAZIMAP['url'] = URL
WAZIMAP['country_code'] = 'NP'
WAZIMAP['profile_builder'] = 'wazimap_np.profiles.get_census_profile'
WAZIMAP['levels'] = {
'country': {
'plural': 'countries',
'children': ['district']
},
'district': {
'plural': 'districts',
'children': ['vdc']
},
'vdc': {
'plural': 'vdcs',
'children': []
}
}
WAZIMAP['comparative_levels'] = ['country', 'district', 'vdc']
WAZIMAP['geometry_data'] = {
'country': 'geo/country.topojson',
'district': 'geo/district.topojson',
'vdc': 'geo/vdc.topojson'
}
WAZIMAP['ga_tracking_id'] = os.environ.get('GA_TRACKING_ID')
WAZIMAP['twitter'] = '@codefornepal'
WAZIMAP['map_centre'] = [28.229651, 83.8165328]
WAZIMAP['map_zoom'] = 7
# Custom Settings
WAZIMAP['email'] = 'nepalmap@codefornepal.org'
WAZIMAP['github'] = 'https://github.com/Code4Nepal/nepalmap_app'
WAZIMAP['tagline'] = 'Explore and understand Nepal using data'
WAZIMAP['facebook'] = 'codefornepal'
WAZIMAP['twittercard'] = True
| # pull in the default wazimap settings
from wazimap.settings import * # noqa
# install this app before Wazimap
INSTALLED_APPS = ['wazimap_np'] + INSTALLED_APPS
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://wazimap_np:wazimap_np@localhost/wazimap_np')
DATABASES['default'] = dj_database_url.parse(DATABASE_URL)
DATABASES['default']['ATOMIC_REQUESTS'] = True
SCHEME = 'http' if (os.environ.get('APP_ENV', 'dev') == 'dev') else 'https'
URL = SCHEME+'://'+'nepalmap.org'
# Localise this instance of Wazimap
WAZIMAP['name'] = 'NepalMap'
# NB: this must be https if your site supports HTTPS.
WAZIMAP['url'] = URL
WAZIMAP['country_code'] = 'NP'
WAZIMAP['profile_builder'] = 'wazimap_np.profiles.get_census_profile'
WAZIMAP['levels'] = {
'country': {
'plural': 'countries',
'children': ['district']
},
'district': {
'plural': 'districts',
'children': ['vdc']
},
'vdc': {
'plural': 'vdcs',
'children': []
}
}
WAZIMAP['comparative_levels'] = ['country', 'district', 'vdc']
WAZIMAP['geometry_data'] = {
'country': 'geo/country.topojson',
'district': 'geo/district.topojson',
'vdc': 'geo/vdc.topojson'
}
WAZIMAP['ga_tracking_id'] = os.environ.get('GA_TRACKING_ID')
WAZIMAP['twitter'] = '@codefornepal'
WAZIMAP['map_centre'] = [28.229651, 83.8165328]
WAZIMAP['map_zoom'] = 7
# Custom Settings
WAZIMAP['email'] = 'nepalmap@codefornepal.org'
WAZIMAP['github'] = 'https://github.com/Code4Nepal/nepalmap_app'
WAZIMAP['tagline'] = 'Explore and understand Nepal using data'
WAZIMAP['facebook'] = 'codefornepal'
WAZIMAP['twittercard'] = True
| Python | 0.004374 |
22578771d9812a21361ec959d16e3eaacba998e3 | Add APData Info collector | APData/APInfo.py | APData/APInfo.py | #
#
#
#
class APInfo:
"""..."""
# Protected members
__IPAddress = ""
__MACAddress = ""
__Channel = 0
__Region = 0
__Localization = ""
__TxPowerList = []
__CurrentPowerIndex = -1
__UnderloadLimit = -1
__OverloadLimit = -1
__Reachable = False
__Enabled = False
__EMailSent = False
__SupportedOS = ""
# Public members mirrors
# Class initialization
def __init__(self):
#
# Set the AP transmission power (Tx)
def updateTxPowerIndex(self, newTxPower):
#
if newTxPower < 0:
self.CurrentPowerIndex = len(self.TxPowerList) - 1
else:
for powerTxIndex in self.TxPowerList:
if newTxPower > self.TxPowerList[powerTxIndex]:
break
self.CurrentPowerIndex = powerTxIndex
# Heroku: User: juvenal-jr@ig.com.br / Senha: w4cpvX3DWw
# wget -qO- https://toolbelt.heroku.com/install-ubuntu.sh | sh
| Python | 0 | |
bd1e135a6ffd9186451ec02fcbcaab7f9066e40f | Add breakpad fetch recipe. | recipes/breakpad.py | recipes/breakpad.py | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Breakpad(recipe_util.Recipe):
@staticmethod
def fetch_spec(props):
url = 'https://chromium.googlesource.com/breakpad/breakpad.git'
solution = {
'name': 'src',
'url': url,
'managed': False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
if props.get('target_os_only'):
spec['target_os_only'] = props['target_os_only']
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Breakpad().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python | 0.001108 | |
4996ddddc14ad0d20759abbcf4d54e6132b7b028 | Add the dj_redis_url file | dj_redis_url.py | dj_redis_url.py | # -*- coding: utf-8 -*-
import os
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
# Register database schemes in URLs.
urlparse.uses_netloc.append("redis")
DEFAULT_ENV = "REDIS_URL"
def config(env=DEFAULT_ENV, default=None, **overrides):
"""Returns configured REDIS dictionary from REDIS_URL."""
config = {}
s = os.environ.get(env, default)
if s:
config = parse(s)
overrides = dict([(k.upper(), v) for k, v in overrides.items()])
config.update(overrides)
return config
def parse(url):
"""Parses a database URL."""
config = {}
url = urlparse.urlparse(url)
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
# Update with environment configuration.
config.update({
"DB": int(path or 0),
"PASSWORD": url.password,
"HOST": url.hostname,
"PORT": url.port,
})
return config
| Python | 0.000001 | |
1487722c0431fce19d54b1b020c3af0ab411cc8a | Add sample config.py file | rename_to_config.py | rename_to_config.py | account_sid = "ACXXXXXXXXXXXXXXXXX"
auth_token = "XXXXXXXXXXXXXXXX"
from_number = "+441111222333"
to_number = "+447777222333"
| Python | 0.000001 | |
74dcd072efabe20137e32fcfa0560a41a532a2ba | reverse Integer | python/math/reverseInteger.py | python/math/reverseInteger.py | class Solution:
# @return an integer
def reverse(self, x):
INT_MAX = 2147483647
INT_MIN = -2147483648
result = 0
negative = 1
if x < 0:
negative = -1
x = 0 - x
temp = x / 10
ten = 1
while temp > 0:
temp = temp / 10
ten = ten * 10
i = 0
while ten > 0:
curr = x / ten
if negative == 1:
if INT_MAX / (10 ** i) < curr or (INT_MAX - result - curr * 10 ** i) < 0:
return 0
else:
if (0-INT_MIN) / (10 ** i) < curr or (0 - INT_MIN - result - curr * 10 ** i) < 0:
return 0
result = result + curr * (10 ** i)
x = x % ten
ten = ten / 10
i += 1
return negative * result
if __name__ == "__main__":
solution = Solution()
print solution.reverse(123)
print solution.reverse(1563847412)
| Python | 0.99998 | |
0cc3aafced65d2f128a8036aad62edb5ee19f566 | Add brume main script | scripts/brume.py | scripts/brume.py | #!/usr/bin/env python
import os
import click
import yaml
from glob import glob
from subprocess import check_output
from brume.template import CfnTemplate
from brume.stack import Stack
def load_configuration(config_file='brume.yml'):
"""Return the YAML configuration for a project based on the `config_file` template."""
from jinja2 import Template
def env(key):
"""Return the value of the `key` environment variable."""
return os.getenv(key, None)
def git_commit():
"""Return the SHA1 of the latest Git commit (HEAD)."""
return check_output(['git', 'rev-parse', '--short', 'HEAD']).strip()
def git_branch():
"""Return the name of the current Git branch."""
return check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
template = Template(open(config_file, 'r').read())
return yaml.load(template.render(env=env, git_commit=git_commit(), git_branch=git_branch()))
conf = load_configuration()
s3_config = conf['templates']
cf_config = conf['stack']
def collect_templates():
return [CfnTemplate(t) for t in glob('*.cform')]
@click.command()
def config():
"""Print the current stack confguration."""
print(yaml.dump(conf))
@click.command()
def create():
"""Create a new CloudFormation stack."""
stack = Stack(cf_config)
stack.create()
stack.tail()
@click.command()
def update():
"""Update an existing CloudFormation stack."""
stack = Stack(cf_config)
stack.update()
stack.tail()
@click.command()
def deploy():
"""Create or update a CloudFormation stack."""
stack = Stack(cf_config)
stack.create_or_update()
stack.tail()
@click.command()
def delete():
"""Delete a CloudFormation stack."""
stack = Stack(cf_config)
stack.delete()
stack.tail()
@click.command()
def validate():
"""Validate CloudFormation templates."""
templates = collect_templates()
return map(lambda t: t.validate(), templates)
@click.command()
def events():
"""Tail the events of the stack."""
Stack(cf_config).tail()
@click.command()
@click.option('--bucket', required=True, help='Name of the bucket')
@click.option('--prefix', required=True, help='Prefix to the file name')
def upload(templates, bucket, path_prefix):
"""Upload CloudFormation templates to S3."""
[t.upload(bucket, path_prefix) for t in templates]
return templates
@click.group()
def cli():
pass
cli.add_command(create)
cli.add_command(update)
cli.add_command(deploy)
cli.add_command(upload)
cli.add_command(delete)
cli.add_command(validate)
cli.add_command(config)
cli.add_command(events)
if __name__ == '__main__':
cli()
| Python | 0 | |
03bfd2059cfaa7043cbcd941465df6b790f84726 | add `branch.py` script for initial email burst | branch.py | branch.py | """
This is a one-off script to populate the new `emails` table using the addresses
we have in `participants` and `elsewhere`.
"""
from __future__ import division, print_function, unicode_literals
import uuid
from aspen.utils import utcnow
import gratipay.wireup
env = gratipay.wireup.env()
db = gratipay.wireup.db(env)
gratipay.wireup.mail(env)
INITIAL_EMAIL = dict(
subject="Connect to {username} on Gratipay?",
html="""\
<div style="text-align: center; font: normal 14px/21px Arial, sans-serif; color: #333;">
We're working on adding email notifications to Gratipay (formerly Gittip)
and we're sending this email to confirm that you (<b>{email}</b>)
<br>
are the owner of the <b><a href="https://gratipay.com/{username}">{username}</a></b>
account on Gratipay. Sound familiar?
<br>
<br>
<a href="{link}" style="color: #fff; text-decoration:none; display:inline-block; padding: 0 15px; background: #396; font: normal 14px/40px Arial, sans-serif; white-space: nowrap; border-radius: 3px">Yes, proceed!</a>
</div>
""",
text="""\
We're working on adding email notifications to Gratipay (formerly Gittip)
and we're sending this email to confirm that you ({email}) are the owner
of the {username} account on Gratipay. Sound familiar? Follow this link
to finish connecting your email:
{link}
""",
)
def add_email(self, email):
nonce = str(uuid.uuid4())
ctime = utcnow()
db.run("""
INSERT INTO emails
(address, nonce, ctime, participant)
VALUES (%s, %s, %s, %s)
""", (email, nonce, ctime, self.username))
username = self.username_lower
link = "https://gratipay.com/{username}/verify-email.html?email={email}&nonce={nonce}"
self.send_email(INITIAL_EMAIL,
email=email,
link=link.format(**locals()),
username=self.username,
include_unsubscribe=False)
participants = db.all("""
UPDATE participants p
SET email = (e.email, false)
FROM (
SELECT DISTINCT ON (participant)
participant, email
FROM elsewhere
WHERE email IS NOT NULL AND email <> ''
ORDER BY participant, platform = 'github' DESC
) e
WHERE e.participant = p.username
AND p.email IS NULL
AND NOT p.is_closed
AND p.is_suspicious IS NOT true
AND p.claimed_time IS NOT NULL;
SELECT p.*::participants
FROM participants p
WHERE email IS NOT NULL
AND NOT is_closed
AND is_suspicious IS NOT true
AND claimed_time IS NOT NULL;
""")
total = len(participants)
for i, p in enumerate(participants, 1):
print('sending email to %s (%i/%i)' % (p.username, i, total))
add_email(p, p.email.address)
| Python | 0.000001 | |
bf02019c8b97d8dc35e3e186b31cb57adac6a8ec | Create a measurement | shrugd-create.py | shrugd-create.py | import ripe.atlas.cousteau
from atlaskeys import create_key
# DNS query properties
query_argument = "wide.ad.jp"
query_type = "AAAA"
dnssec_ok = True
set_nsid_bit = True
# IP addresses to start from
dns_server_ips = [
"199.7.91.13", "2001:500:2d::d", # D.ROOT-SERVERS.NET
"192.203.230.10", # E.ROOT-SERVERS.NET
]
def ip_address_family(ip_addr):
"""Return whether an IP address is IPv4 or IPv6"""
if ':' in ip_addr:
return 6
else:
return 4
dns_measurements = []
for ip_addr in dns_server_ips:
dns_query = ripe.atlas.cousteau.Dns(
target=ip_addr,
af=ip_address_family(ip_addr),
query_argument=query_argument,
query_type=query_type,
query_class="IN",
set_nsid_bit=set_nsid_bit,
udp_payload_size=4096,
description="shrugd " + query_argument + "/"
)
dns_measurements.append(dns_query)
break
# XXX: possibly should at least pick good IPv6 servers when querying over IPv6
source = ripe.atlas.cousteau.AtlasSource(type="area", value="WW", requested=1)
atlas_request = ripe.atlas.cousteau.AtlasCreateRequest(
key=create_key,
measurements=dns_measurements,
sources=[source],
is_oneoff=True
)
(is_success, response) = atlas_request.create()
if is_success:
print("worked, IDs: %s" % response)
else:
print("did not work")
| Python | 0.998877 | |
69d358fa08652e44dc37974bb735cfdc40ccf1db | increase UPDATE_INTERVAL (#18429) | homeassistant/components/edp_redy.py | homeassistant/components/edp_redy.py | """
Support for EDP re:dy.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/edp_redy/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_START)
from homeassistant.core import callback
from homeassistant.helpers import discovery, dispatcher, aiohttp_client
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'edp_redy'
EDP_REDY = 'edp_redy'
DATA_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
UPDATE_INTERVAL = 60
REQUIREMENTS = ['edp_redy==0.0.2']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the EDP re:dy component."""
from edp_redy import EdpRedySession
session = EdpRedySession(config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
aiohttp_client.async_get_clientsession(hass),
hass.loop)
hass.data[EDP_REDY] = session
platform_loaded = False
async def async_update_and_sched(time):
update_success = await session.async_update()
if update_success:
nonlocal platform_loaded
# pylint: disable=used-before-assignment
if not platform_loaded:
for component in ['sensor', 'switch']:
await discovery.async_load_platform(hass, component,
DOMAIN, {}, config)
platform_loaded = True
dispatcher.async_dispatcher_send(hass, DATA_UPDATE_TOPIC)
# schedule next update
async_track_point_in_time(hass, async_update_and_sched,
time + timedelta(seconds=UPDATE_INTERVAL))
async def start_component(event):
_LOGGER.debug("Starting updates")
await async_update_and_sched(dt_util.utcnow())
# only start fetching data after HA boots to prevent delaying the boot
# process
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_component)
return True
class EdpRedyDevice(Entity):
"""Representation a base re:dy device."""
def __init__(self, session, device_id, name):
"""Initialize the device."""
self._session = session
self._state = None
self._is_available = True
self._device_state_attributes = {}
self._id = device_id
self._unique_id = device_id
self._name = name if name else device_id
async def async_added_to_hass(self):
"""Subscribe to the data updates topic."""
dispatcher.async_dispatcher_connect(
self.hass, DATA_UPDATE_TOPIC, self._data_updated)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@callback
def _data_updated(self):
"""Update state, trigger updates."""
self.async_schedule_update_ha_state(True)
def _parse_data(self, data):
"""Parse data received from the server."""
if "OutOfOrder" in data:
try:
self._is_available = not data['OutOfOrder']
except ValueError:
_LOGGER.error(
"Could not parse OutOfOrder for %s", self._id)
self._is_available = False
| """
Support for EDP re:dy.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/edp_redy/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_START)
from homeassistant.core import callback
from homeassistant.helpers import discovery, dispatcher, aiohttp_client
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'edp_redy'
EDP_REDY = 'edp_redy'
DATA_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
UPDATE_INTERVAL = 30
REQUIREMENTS = ['edp_redy==0.0.2']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the EDP re:dy component."""
from edp_redy import EdpRedySession
session = EdpRedySession(config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
aiohttp_client.async_get_clientsession(hass),
hass.loop)
hass.data[EDP_REDY] = session
platform_loaded = False
async def async_update_and_sched(time):
update_success = await session.async_update()
if update_success:
nonlocal platform_loaded
# pylint: disable=used-before-assignment
if not platform_loaded:
for component in ['sensor', 'switch']:
await discovery.async_load_platform(hass, component,
DOMAIN, {}, config)
platform_loaded = True
dispatcher.async_dispatcher_send(hass, DATA_UPDATE_TOPIC)
# schedule next update
async_track_point_in_time(hass, async_update_and_sched,
time + timedelta(seconds=UPDATE_INTERVAL))
async def start_component(event):
_LOGGER.debug("Starting updates")
await async_update_and_sched(dt_util.utcnow())
# only start fetching data after HA boots to prevent delaying the boot
# process
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_component)
return True
class EdpRedyDevice(Entity):
"""Representation a base re:dy device."""
def __init__(self, session, device_id, name):
"""Initialize the device."""
self._session = session
self._state = None
self._is_available = True
self._device_state_attributes = {}
self._id = device_id
self._unique_id = device_id
self._name = name if name else device_id
async def async_added_to_hass(self):
"""Subscribe to the data updates topic."""
dispatcher.async_dispatcher_connect(
self.hass, DATA_UPDATE_TOPIC, self._data_updated)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@callback
def _data_updated(self):
"""Update state, trigger updates."""
self.async_schedule_update_ha_state(True)
def _parse_data(self, data):
"""Parse data received from the server."""
if "OutOfOrder" in data:
try:
self._is_available = not data['OutOfOrder']
except ValueError:
_LOGGER.error(
"Could not parse OutOfOrder for %s", self._id)
self._is_available = False
| Python | 0 |
bbdc1961271acf0dd0ad8818d41b84eea4a5aec4 | Use entity_id attribute | homeassistant/components/influxdb.py | homeassistant/components/influxdb.py | """
homeassistant.components.influxdb
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
InfluxDB component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (EVENT_STATE_CHANGED, STATE_ON, STATE_OFF,
STATE_UNLOCKED, STATE_LOCKED, STATE_UNKNOWN)
from homeassistant.components.sun import (STATE_ABOVE_HORIZON,
STATE_BELOW_HORIZON)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "influxdb"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
REQUIREMENTS = ['influxdb==2.11.0']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_DB_NAME = 'database'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_SSL = 'ssl'
CONF_VERIFY_SSL = 'verify_ssl'
def setup(hass, config):
""" Setup the InfluxDB component. """
from influxdb import InfluxDBClient, exceptions
if not validate_config(config, {DOMAIN: ['host']}, _LOGGER):
return False
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
database = util.convert(conf.get(CONF_DB_NAME), str, DEFAULT_DATABASE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
ssl = util.convert(conf.get(CONF_SSL), bool, DEFAULT_SSL)
verify_ssl = util.convert(conf.get(CONF_VERIFY_SSL), bool,
DEFAULT_VERIFY_SSL)
try:
influx = InfluxDBClient(host=host, port=port, username=username,
password=password, database=database,
ssl=ssl, verify_ssl=verify_ssl)
influx.query("select * from /.*/ LIMIT 1;")
except exceptions.InfluxDBClientError as exc:
_LOGGER.error("Database host is not accessible due to '%s', please "
"check your entries in the configuration file and that"
" the database exists and is READ/WRITE.", exc)
return False
def influx_event_listener(event):
""" Listen for new messages on the bus and sends them to Influx. """
state = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, ''):
return
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON):
_state = 1
elif state.state in (STATE_OFF, STATE_UNLOCKED, STATE_BELOW_HORIZON):
_state = 0
else:
try:
_state = float(state.state)
except ValueError:
_state = state.state
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
measurement = state.entity_id
json_body = [
{
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event.time_fired,
'fields': {
'value': _state,
}
}
]
try:
influx.write_points(json_body)
except exceptions.InfluxDBClientError:
_LOGGER.exception('Error saving event "%s" to InfluxDB', json_body)
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
return True
| """
homeassistant.components.influxdb
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
InfluxDB component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (EVENT_STATE_CHANGED, STATE_ON, STATE_OFF,
STATE_UNLOCKED, STATE_LOCKED, STATE_UNKNOWN)
from homeassistant.components.sun import (STATE_ABOVE_HORIZON,
STATE_BELOW_HORIZON)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "influxdb"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
REQUIREMENTS = ['influxdb==2.11.0']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_DB_NAME = 'database'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_SSL = 'ssl'
CONF_VERIFY_SSL = 'verify_ssl'
def setup(hass, config):
""" Setup the InfluxDB component. """
from influxdb import InfluxDBClient, exceptions
if not validate_config(config, {DOMAIN: ['host']}, _LOGGER):
return False
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
database = util.convert(conf.get(CONF_DB_NAME), str, DEFAULT_DATABASE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
ssl = util.convert(conf.get(CONF_SSL), bool, DEFAULT_SSL)
verify_ssl = util.convert(conf.get(CONF_VERIFY_SSL), bool,
DEFAULT_VERIFY_SSL)
try:
influx = InfluxDBClient(host=host, port=port, username=username,
password=password, database=database,
ssl=ssl, verify_ssl=verify_ssl)
influx.query("select * from /.*/ LIMIT 1;")
except exceptions.InfluxDBClientError as exc:
_LOGGER.error("Database host is not accessible due to '%s', please "
"check your entries in the configuration file and that"
" the database exists and is READ/WRITE.", exc)
return False
def influx_event_listener(event):
""" Listen for new messages on the bus and sends them to Influx. """
state = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, ''):
return
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON):
_state = 1
elif state.state in (STATE_OFF, STATE_UNLOCKED, STATE_BELOW_HORIZON):
_state = 0
else:
try:
_state = float(state.state)
except ValueError:
_state = state.state
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
measurement = '{}.{}'.format(state.domain, state.object_id)
json_body = [
{
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event.time_fired,
'fields': {
'value': _state,
}
}
]
try:
influx.write_points(json_body)
except exceptions.InfluxDBClientError:
_LOGGER.exception('Error saving event "%s" to InfluxDB', json_body)
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
return True
| Python | 0.000002 |
323176a9749d37d05e87339fe34b50b90cc6b663 | add solution for Maximum Product Subarray | src/maximumProductSubarray.py | src/maximumProductSubarray.py | class Solution:
# @param A, a list of integers
# @return an integer
def maxProduct(self, A):
if not A:
return 0
if len(A) == 1:
return A[0]
maxV, minV = A[0], A[0]
res = maxV
for val in A[1:]:
if val > 0:
maxV, minV = max(val, maxV * val), min(val, minV * val)
else:
maxV, minV = max(val, minV * val), min(val, maxV * val)
res = max(res, maxV)
return res
| Python | 0 | |
abd23cbc80149d4f2985eb8aef5d893714cca717 | add a script to reset the db | scripts/reset_db.py | scripts/reset_db.py | from scraper import clean
def run():
if raw_input("Are you sure? Then write 'yes'") == "yes":
clean()
| Python | 0 | |
43e5727d4091e0b6cb11e0e13ea9f7daf69628fc | Add corpusPreProcess. | corpusPreProcess.py | corpusPreProcess.py | #! /usr/share/env python
# -*- coding=utf-8 -*-
resultFile = open('corpus/BigCorpusPre.txt', 'w')
with open('corpus/BigCorpus.txt', 'r') as f:
for line in f:
line = line[line.find(':')+1:]
resultFile.write(line.strip()+'\n')
resultFile.close()
| Python | 0 | |
82089ad5e5c0d597cfdd16575b4fa5a9a09415ff | introduce plumbery from the command line -- no python coding, yeah! | plumbery/__main__.py | plumbery/__main__.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Runs plumbery from the command line
Example::
$ python -m plumbery fittings.yaml build web
To get some help, you can type::
$ python -m plumbery -h
"""
import sys
import argparse
from engine import PlumberyEngine
parser = argparse.ArgumentParser(
prog='plumbery',
description='Plumbing infrastructure with Apache Libcloud.')
parser.add_argument(
'fittings',
nargs=1,
help='File that is containing fittings plan')
parser.add_argument(
'action',
nargs=1,
help="Either 'build', 'start', 'polish', 'stop' or 'destroy'")
parser.add_argument(
'blueprint',
nargs='?',
help='Name of the selected blueprint. '
'If omitted, all blueprints will be considered',
default=None)
args = parser.parse_args()
engine = PlumberyEngine(args.fittings[0])
verb = args.action[0].lower()
if verb == 'build':
if args.blueprint is None:
engine.build_all_blueprints()
else:
engine.build_blueprint(args.blueprint)
elif verb == 'start':
if args.blueprint is None:
engine.start_all_nodes()
else:
engine.start_nodes(args.blueprint)
elif verb == 'polish':
if args.blueprint is None:
engine.polish_all_blueprints()
else:
engine.polish_blueprint(args.blueprint)
elif verb == 'stop':
if args.blueprint is None:
engine.stop_all_nodes()
else:
engine.stop_node(args.blueprint)
elif verb == 'destroy':
if args.blueprint is None:
engine.destroy_all_blueprints()
else:
engine.destroy_blueprint(args.blueprint)
else:
print("{}: error: unrecognised action '{}'".format('plumbery', args.action[0]))
parser.print_help()
sys.exit(2)
| Python | 0 | |
7182af317116db7eb3f7a278b3487ad91a3b3331 | Add example for a clunky 3D high resolution loupe for napari | high-res-slider.py | high-res-slider.py | import functools
import numpy as np
import dask.array as da
from magicgui.widgets import Slider, Container
import napari
# stack = ... # your dask array
# stack2 = stack[::2, ::2, ::2]
# stack4 = stack2[::2, ::2, ::2]
# 👆 quick and easy multiscale pyramid, don't do this really
# see https://github.com/dask/dask-image/issues/136
# for better ways
# and, specifically, stack4 will be small but will still need
# to access full data. You should save all data sizes as
# their own arrays on disk and load those. I recommend
# using dask.array.Array.to_zarr.
# You can also read about NGFF:
# https://ngff.openmicroscopy.org/latest/
# example with some example data from Liu et al, Science, 2018
stack, stack2, stack4 = [
da.from_zarr(f'/Users/jni/data/gokul-lls/{i}.zarr')[0]
for i in range(3)
]
# a list of arrays of decreasing size is interpreted as
# a multiscale dataset by napari
multiscale_data = [stack, stack2, stack4]
viewer = napari.Viewer(ndisplay=3)
multiscale_layer = viewer.add_image(
multiscale_data,
colormap='magenta',
scale=[3, 1, 1],
)
crop_sizes = (30, 256, 256)
cropz, cropy, cropx = crop_sizes
shapez, shapey, shapex = stack.shape
ends = np.asarray(stack.shape) - np.asarray(crop_sizes) + 1
stepsizes = ends // 100
highres_crop_layer = viewer.add_image(
stack[:cropz, :cropy, :cropx],
name='cropped',
blending='additive',
colormap='green',
scale=multiscale_layer.scale,
)
def set_slice(axis, value):
idx = int(value)
scale = np.asarray(highres_crop_layer.scale)
translate = np.asarray(highres_crop_layer.translate)
izyx = translate // scale
izyx[axis] = idx
i, j, k = izyx
highres_crop_layer.data = stack[i:i + cropz, j:j + cropy, k:k + cropx]
highres_crop_layer.translate = scale * izyx
highres_crop_layer.refresh()
sliders = [
Slider(name=axis, min=0, max=end, step=step)
for axis, end, step in zip('zyx', ends, stepsizes)
]
for axis, slider in enumerate(sliders):
slider.changed.connect(
lambda event, axis=axis: set_slice(axis, event.value)
)
container_widget = Container(layout='vertical')
container_widget.extend(sliders)
viewer.window.add_dock_widget(container_widget, area='right')
napari.run()
| Python | 0.000001 | |
1e104af5dc1ef5cbec4bfad62a1691bd0c784caf | Add lstm with zoneout (slow to converge). | rhn/lstm_zoneout.py | rhn/lstm_zoneout.py | # LSTM implementation using zoneout as described in
# Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations
# https://arxiv.org/abs/1606.01305
from keras import backend as K
from keras.layers import LSTM, time_distributed_dense
from keras import initializations, activations, regularizers
from keras.engine import InputSpec
class LSTM_zoneout(LSTM):
def __init__(self, output_dim, zoneout_h=0., zoneout_c=0., **kwargs):
self.zoneout_h = zoneout_h
self.zoneout_c = zoneout_c
if self.zoneout_h or self.zoneout_c:
self.uses_learning_phase = True
super(LSTM_zoneout, self).__init__(output_dim, **kwargs)
def zoneout(self, v, prev_v, pr=0.):
diff = v - prev_v
diff = K.in_train_phase(K.dropout(diff, pr, noise_shape=(self.output_dim,)), diff)
# In testing, return v * (1-pr) + pr * prev_v
# In training when dropout returns 0, return prev_v
# when dropout diff/(1-pr), return v
return prev_v + diff * (1-pr)
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
if self.consume_less == 'gpu':
z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim: 2 * self.output_dim]
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
else:
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
elif self.consume_less == 'mem':
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
else:
raise Exception('Unknown `consume_less` mode.')
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))
if self.zoneout_c:
c = self.zoneout(c, c_tm1, pr=self.zoneout_c)
h = o * self.activation(c)
if self.zoneout_h:
h = self.zoneout(h, h_tm1, pr=self.zoneout_h)
return h, [h, c]
| Python | 0 | |
72f32099411644a3fed6103430f7dd78fb0929a5 | Add new content parser class (based upon code in Konstruktuer) | konstrukteur/ContentParser.py | konstrukteur/ContentParser.py | #
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
import glob, os
from jasy.core import Console
import konstrukteur.Language
import konstrukteur.Util
class ContentParser:
""" Content parser class for Konstrukteur """
def __init__(self, extensions, fixJasyCommands, defaultLanguage):
self.__extensions = extensions
self.__extensionParser = {}
self.__extensionParser["html"] = konstrukteur.HtmlParser
self.__id = 1
self.__commandReplacer = []
self.__fixJasyCommands = fixJasyCommands
self.__languages = {}
self.__defaultLanguage = defaultLanguage
def parse(self, pagesPath, pages, languages):
#pagesPath = os.path.join(self.__contentPath, sourcePath)
Console.info("Parse content files at %s" % pagesPath)
Console.indent()
for extension in self.__extensions:
for filename in glob.iglob(os.path.join(pagesPath, "*.%s" % extension)):
basename = os.path.basename(filename)
Console.debug("Parsing %s" % basename)
page = self.__parseContentFile(filename, extension)
if page:
self.generateFields(page, languages)
pages.append(page)
else:
Console.error("Error parsing %s" % filename)
Console.outdent()
def generateFields(self, page, languages):
for key, value in page.items():
page[key] = self.__fixJasyCommands(value)
if "slug" in page:
page["slug"] =konstrukteur.Util.fixSlug(page["slug"])
else:
page["slug"] = konstrukteur.Util.fixSlug(page["title"])
page["content"] = konstrukteur.Util.fixCoreTemplating(page["content"])
if not "status" in page:
page["status"] = "published"
if not "pos" in page:
page["pos"] = 0
else:
page["pos"] = int(page["pos"])
if not "lang" in page:
page["lang"] = self.__defaultLanguage
if page["lang"] not in languages:
languages.append(page["lang"])
return page
def __parseContentFile(self, filename, extension):
""" Parse single content file """
if not extension in self.__extensionParser:
raise RuntimeError("No content parser for extension %s registered" % extension)
return self.__extensionParser[extension].parse(filename)
| Python | 0 | |
010028c9170e153d3bc9e3618e02feb2d5e0fb7a | Add stress tester for testing glbackend | scripts/stresser.py | scripts/stresser.py | #!/usr/bin/env python
import json
from twisted.internet import protocol, defer, reactor
from twisted.web.iweb import IBodyProducer
from zope.interface import implements
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
base_url = 'http://127.0.0.1:8082'
class StringProducer(object):
implements(IBodyProducer)
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class BodyReceiver(protocol.Protocol):
def __init__(self, finished, content_length=None):
self.finished = finished
self.data = ""
self.bytes_remaining = content_length
def dataReceived(self, b):
self.data += b
if self.bytes_remaining:
if self.bytes_remaining == 0:
self.connectionLost(None)
else:
self.bytes_remaining -= len(b)
def connectionLost(self, reason):
self.finished.callback(self.data)
def failed(exc, method, url, data, response=None, response_body=None):
print "[!] %s - %s" % (method, url)
print "payload: %s" % data
print "response_body: %s" % response_body
print "response: %s" % response
@defer.inlineCallbacks
def request(method, url, data=None, session_id=None):
agent = Agent(reactor)
headers = {
'Cookie': ['XSRF-TOKEN=antani;'],
'X-XSRF-TOKEN': ['antani']
}
if session_id:
headers['X-Session'] = [str(session_id)]
bodyProducer = None
if data:
bodyProducer = StringProducer(json.dumps(data))
try:
response = yield agent.request(method, str(base_url + url),
Headers(headers), bodyProducer)
except Exception as exc:
failed(exc, method, url, data)
raise exc
try:
content_length = response.headers.getRawHeaders('content-length')
except IndexError:
content_length = None
finished = defer.Deferred()
response.deliverBody(BodyReceiver(finished, content_length))
response_body = yield finished
try:
d = json.loads(response_body)
except Exception as exc:
failed(exc, method, url, data, response, response_body)
defer.returnValue(d)
class Submission(object):
def __init__(self, context):
self.data = {
'context_gus': context['context_gus'],
'files': '',
'finalize': False,
'receivers': context['receivers'],
'wb_fields': {}
}
self.fields = context['fields']
self.id = None
@defer.inlineCallbacks
def create(self):
response = yield request('POST', '/submission', self.data)
receivers = self.data['receivers']
self.data = response
self.data['receivers'] = receivers
def randomFill(self):
for field in self.fields:
self.data['wb_fields'][field['key']] = 'I am an evil stress tester...'
@defer.inlineCallbacks
def finalize(self):
self.data['finalize'] = True
response = yield request('PUT', '/submission/' + self.data['id'], self.data)
defer.returnValue(response)
@defer.inlineCallbacks
def authenticate(password, role, username=''):
response = yield request('POST', '/authentication', {
'password': password,
'role': role,
'username': username
})
defer.returnValue(response)
class WBTip(object):
def __init__(self, receipt):
self.receipt = receipt
self.session_id = None
@defer.inlineCallbacks
def authenticate(self):
session = yield authenticate(self.receipt, 'wb')
self.session_id = session['session_id']
def comment(self, text):
tid = '88197484-655c-e805-6420-9c39e6834721'
d = request('POST', '/tip/'+tid+'/comments',
{'content': text, 'tip_id': tid},
session_id=self.session_id)
@d.addErrback
def eb(err):
print err
@defer.inlineCallbacks
def doStuff():
contexts = yield request('GET', '/contexts')
sub = Submission(contexts[0])
yield sub.create()
sub.randomFill()
submission = yield sub.finalize()
print "Receipt: %s" % submission['receipt']
tip = WBTip(submission['receipt'])
yield tip.authenticate()
yield tip.comment("HELLO")
print "Fin."
@defer.inlineCallbacks
def submissionWorkflow(context, request_delay, idx):
idx -= 1
sub = Submission(context)
yield sub.create()
sub.randomFill()
submission = yield sub.finalize()
if idx == 0:
print "I am now done"
else:
reactor.callLater(request_delay, submissionWorkflow, request_delay, context, idx)
@defer.inlineCallbacks
def submissionFuzz(request_delay, submission_count):
print "Using %s - %s" % (request_delay, submission_count)
contexts = yield request('GET', '/contexts')
submissionWorkflow(contexts[0], request_delay, submission_count)
submissionFuzz(1, 10)
reactor.run()
| Python | 0 | |
1d20bec9306904a6d676c4e1e34a07a842a7a600 | Add the IGMP file which got left out. | pcs/packets/igmp.py | pcs/packets/igmp.py | import pcs
from socket import AF_INET, inet_ntop
import struct
import inspect
import time
import igmpv2
import igmpv3
#import dvmrp
#import mtrace
IGMP_HOST_MEMBERSHIP_QUERY = 0x11
IGMP_v1_HOST_MEMBERSHIP_REPORT = 0x12
IGMP_DVMRP = 0x13
IGMP_v2_HOST_MEMBERSHIP_REPORT = 0x16
IGMP_HOST_LEAVE_MESSAGE = 0x17
IGMP_v3_HOST_MEMBERSHIP_REPORT = 0x22
IGMP_MTRACE_REPLY = 0x1e
IGMP_MTRACE_QUERY = 0x1f
map = {
IGMP_HOST_MEMBERSHIP_QUERY: igmpv2.igmpv2,
IGMP_v1_HOST_MEMBERSHIP_REPORT: igmpv2.igmpv2,
#IGMP_DVMRP: dvmrp.dvmrp,
IGMP_v2_HOST_MEMBERSHIP_REPORT: igmpv2.igmpv2,
IGMP_HOST_LEAVE_MESSAGE: igmpv2.igmpv2,
#IGMP_MTRACE_REPLY: mtrace.reply,
#IGMP_MTRACE_QUERY: mtrace.query,
IGMP_v3_HOST_MEMBERSHIP_REPORT: igmpv3.report
}
descr = {
IGMP_HOST_MEMBERSHIP_QUERY: "IGMPv2 Query",
IGMP_v1_HOST_MEMBERSHIP_REPORT: "IGMPv1 Report",
IGMP_DVMRP: "DVMRP",
IGMP_v2_HOST_MEMBERSHIP_REPORT: "IGMPv2 Report",
IGMP_HOST_LEAVE_MESSAGE: "IGMPv2 Leave",
IGMP_MTRACE_REPLY: "MTRACE Reply",
IGMP_MTRACE_QUERY: "MTRACE Query",
IGMP_v3_HOST_MEMBERSHIP_REPORT: "IGMPv3 Report"
}
class igmp(pcs.Packet):
"""IGMP"""
_layout = pcs.Layout()
_map = map
_descr = descr
def __init__(self, bytes = None, timestamp = None):
""" Define the common IGMP encapsulation; see RFC 2236. """
type = pcs.Field("type", 8, discriminator=True)
code = pcs.Field("code", 8)
checksum = pcs.Field("checksum", 16)
pcs.Packet.__init__(self, [type, code, checksum], bytes = bytes)
# Description MUST be set after the PCS layer init
self.description = inspect.getdoc(self)
if timestamp == None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if bytes != None:
offset = self.sizeof()
if self.type == IGMP_HOST_MEMBERSHIP_QUERY and \
len(bytes) >= igmpv3.IGMP_V3_QUERY_MINLEN:
self.data = igmpv3.query(bytes[offset:len(bytes)],
timestamp = timestamp)
else:
self.data = self.next(bytes[offset:len(bytes)],
timestamp = timestamp)
else:
self.data = None
def __str__(self):
"""Walk the entire packet and pretty print the values of the fields."""
retval = self._descr[self.type] + "\n"
for field in self._layout:
retval += "%s %s\n" % (field.name, field.value)
return retval
| Python | 0 | |
2cadad76c2756852b94948088e92b9191abebbb7 | make one pickle file with all metadata (for faster loading) | generate_metadata_pkl.py | generate_metadata_pkl.py | import argparse
from dicom.sequence import Sequence
import glob
import re
from log import print_to_file
import cPickle as pickle
def read_slice(path):
return pickle.load(open(path))['data']
def convert_to_number(value):
value = str(value)
try:
if "." in value:
return float(value)
else:
return int(value)
except:
pass
return value
def clean_metadata(metadatadict):
# Do cleaning
keys = sorted(list(metadatadict.keys()))
for key in keys:
value = metadatadict[key]
if key == 'PatientAge':
metadatadict[key] = int(value[:-1])
if key == 'PatientSex':
metadatadict[key] = 1 if value == 'F' else -1
else:
if isinstance(value, Sequence):
#convert to list
value = [i for i in value]
if isinstance(value, (list,)):
metadatadict[key] = [convert_to_number(i) for i in value]
else:
metadatadict[key] = convert_to_number(value)
return metadatadict
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = clean_metadata(d)
return metadata
def get_patient_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + r'/*.pkl'),
key=lambda x: int(re.search(r'/*_(\d+)\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(.*_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
patient_data.append({'metadata': metadata,
'slice_id': slice_id})
return patient_data, pid
def get_metadata(data_path):
patient_paths = sorted(glob.glob(data_path + '/*/study'))
metadata_dict = {}
for p in patient_paths:
patient_data, pid = get_patient_data(p)
print "patient", pid
metadata_dict[pid] = dict()
for pd in patient_data:
metadata_dict[pid][pd['slice_id']] = pd['metadata']
filename = data_path.split('/')[-1] + '_metadata.pkl'
with open(filename, 'w') as f:
pickle.dump(metadata_dict, f)
print 'saved to ', filename
return metadata_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = ['/mnt/storage/data/dsb15_pkl/pkl_train', '/mnt/storage/data/dsb15_pkl/pkl_validate']
with print_to_file("/mnt/storage/metadata/kaggle-heart/logs/generate_metadata.log"):
for d in data_paths:
get_metadata(d)
print "log saved to '%s'" % ("/mnt/storage/metadata/kaggle-heart/logs/generate_metadata.log")
| Python | 0 | |
8939e873f4ea61169f9384eded5b8c603cfde988 | Add crypto pre-submit that will add the openssl builder to the default try-bot list. | crypto/PRESUBMIT.py | crypto/PRESUBMIT.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves(project, change):
# Changes in crypto often need a corresponding OpenSSL edit.
return ['linux_redux']
| Python | 0.000006 | |
27ed31c7a21c4468bc86aaf220e30315e366c425 | add message to SearxParameterException - fixes #1722 | searx/exceptions.py | searx/exceptions.py | '''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2017- by Alexandre Flament, <alex@al-f.net>
'''
class SearxException(Exception):
pass
class SearxParameterException(SearxException):
def __init__(self, name, value):
if value == '' or value is None:
message = 'Empty ' + name + ' parameter'
else:
message = 'Invalid value "' + value + '" for parameter ' + name
super(SearxParameterException, self).__init__(message)
self.message = message
self.parameter_name = name
self.parameter_value = value
| '''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2017- by Alexandre Flament, <alex@al-f.net>
'''
class SearxException(Exception):
pass
class SearxParameterException(SearxException):
def __init__(self, name, value):
if value == '' or value is None:
message = 'Empty ' + name + ' parameter'
else:
message = 'Invalid value "' + value + '" for parameter ' + name
super(SearxParameterException, self).__init__(message)
self.parameter_name = name
self.parameter_value = value
| Python | 0 |
a230bb1b2f1c96c7f9764ee2bf759ea9fe39e801 | add populations tests | isochrones/tests/test_populations.py | isochrones/tests/test_populations.py | import unittest
from pandas.testing import assert_frame_equal
from scipy.stats import uniform, norm
from isochrones import get_ichrone
from isochrones.priors import ChabrierPrior, FehPrior, GaussianPrior, SalpeterPrior, DistancePrior, AVPrior
from isochrones.populations import StarFormationHistory, StarPopulation, BinaryDistribution, deredden
class PopulationTest(unittest.TestCase):
def setUp(self):
mist = get_ichrone("mist")
sfh = StarFormationHistory() # Constant SFR for 10 Gyr; or, e.g., dist=norm(3, 0.2)
imf = SalpeterPrior(bounds=(0.4, 10)) # bounds on solar masses
binaries = BinaryDistribution(fB=0.4, gamma=0.3)
feh = GaussianPrior(-0.2, 0.2)
distance = DistancePrior(max_distance=3000) # pc
AV = AVPrior(bounds=[0, 2])
pop = StarPopulation(
mist, sfh=sfh, imf=imf, feh=feh, distance=distance, binary_distribution=binaries, AV=AV
)
self.pop = pop
self.mist = mist
self.df = pop.generate(1000)
self.dereddened_df = deredden(mist, self.df)
def test_mags(self):
"""Check no total mags are null
"""
mags = [f"{b}_mag" for b in self.mist.bands]
assert self.df[mags].isnull().sum().sum() == 0
def test_dereddening(self):
"""Check mass, age, feh the same when dereddened
"""
cols = ["initial_mass", "initial_feh", "requested_age"]
assert_frame_equal(self.df[cols], self.dereddened_df[cols])
# Check de-reddening vis-a-vis A_x
for b in self.mist.bands:
diff = (self.dereddened_df[f"{b}_mag"] + self.df[f"A_{b}"]) - self.df[f"{b}_mag"]
is_binary = self.df.mass_B > 0
assert diff.loc[~is_binary].std() < 0.0001
| Python | 0 | |
6ebed7a2a6488a857fc6878c2d39d26ce9bc72f5 | Add release 9.1.0 recognition to the Dyninst API package file. | var/spack/repos/builtin/packages/dyninst/package.py | var/spack/repos/builtin/packages/dyninst/package.py | ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dyninst(Package):
"""API for dynamic binary instrumentation. Modify programs while they
are executing without recompiling, re-linking, or re-executing."""
homepage = "https://paradyn.org"
url = "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1.2/DyninstAPI-8.1.2.tgz"
list_url = "http://www.dyninst.org/downloads/dyninst-8.x"
version('9.1.0', '5c64b77521457199db44bec82e4988ac',
url="http://www.paradyn.org/release9.1.0/DyninstAPI-9.1.0.tgz")
version('8.2.1', 'abf60b7faabe7a2e4b54395757be39c7',
url="http://www.paradyn.org/release8.2/DyninstAPI-8.2.1.tgz")
version('8.1.2', 'bf03b33375afa66fe0efa46ce3f4b17a',
url="http://www.paradyn.org/release8.1.2/DyninstAPI-8.1.2.tgz")
version('8.1.1', 'd1a04e995b7aa70960cd1d1fac8bd6ac',
url="http://www.paradyn.org/release8.1/DyninstAPI-8.1.1.tgz")
depends_on("libelf")
depends_on("libdwarf")
depends_on("boost@1.42:")
# new version uses cmake
def install(self, spec, prefix):
libelf = spec['libelf'].prefix
libdwarf = spec['libdwarf'].prefix
with working_dir('spack-build', create=True):
cmake('..',
'-DBoost_INCLUDE_DIR=%s' % spec['boost'].prefix.include,
'-DBoost_LIBRARY_DIR=%s' % spec['boost'].prefix.lib,
'-DBoost_NO_SYSTEM_PATHS=TRUE',
'-DLIBELF_INCLUDE_DIR=%s' % join_path(libelf.include, 'libelf'),
'-DLIBELF_LIBRARIES=%s' % join_path(libelf.lib, 'libelf.so'),
'-DLIBDWARF_INCLUDE_DIR=%s' % libdwarf.include,
'-DLIBDWARF_LIBRARIES=%s' % join_path(libdwarf.lib, 'libdwarf.so'),
*std_cmake_args)
make()
make("install")
@when('@:8.1')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
| ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dyninst(Package):
"""API for dynamic binary instrumentation. Modify programs while they
are executing without recompiling, re-linking, or re-executing."""
homepage = "https://paradyn.org"
url = "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1.2/DyninstAPI-8.1.2.tgz"
list_url = "http://www.dyninst.org/downloads/dyninst-8.x"
version('8.2.1', 'abf60b7faabe7a2e4b54395757be39c7',
url="http://www.paradyn.org/release8.2/DyninstAPI-8.2.1.tgz")
version('8.1.2', 'bf03b33375afa66fe0efa46ce3f4b17a',
url="http://www.paradyn.org/release8.1.2/DyninstAPI-8.1.2.tgz")
version('8.1.1', 'd1a04e995b7aa70960cd1d1fac8bd6ac',
url="http://www.paradyn.org/release8.1/DyninstAPI-8.1.1.tgz")
depends_on("libelf")
depends_on("libdwarf")
depends_on("boost@1.42:")
# new version uses cmake
def install(self, spec, prefix):
libelf = spec['libelf'].prefix
libdwarf = spec['libdwarf'].prefix
with working_dir('spack-build', create=True):
cmake('..',
'-DBoost_INCLUDE_DIR=%s' % spec['boost'].prefix.include,
'-DBoost_LIBRARY_DIR=%s' % spec['boost'].prefix.lib,
'-DBoost_NO_SYSTEM_PATHS=TRUE',
'-DLIBELF_INCLUDE_DIR=%s' % join_path(libelf.include, 'libelf'),
'-DLIBELF_LIBRARIES=%s' % join_path(libelf.lib, 'libelf.so'),
'-DLIBDWARF_INCLUDE_DIR=%s' % libdwarf.include,
'-DLIBDWARF_LIBRARIES=%s' % join_path(libdwarf.lib, 'libdwarf.so'),
*std_cmake_args)
make()
make("install")
@when('@:8.1')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
| Python | 0 |
06df583e3821470856852b10f0703fccce81e2d6 | Add planex-pin command | planex/pin.py | planex/pin.py | """
planex-pin: Generate a new override spec file for a given package
"""
import argparse
import os
import sys
import re
import logging
from planex.util import run
def describe(repo, treeish="HEAD"):
dotgitdir = os.path.join(repo, ".git")
if not os.path.exists(dotgitdir):
raise Exception("Pin target is not a git repository: '%s'" % repo)
# First, get the hash of the commit
cmd = ["git", "--git-dir=%s" % dotgitdir, "rev-parse", treeish]
sha = run(cmd)['stdout'].strip()
# Now lets describe that hash
cmd = ["git", "--git-dir=%s" % dotgitdir, "describe", "--tags", sha]
description = run(cmd, check=False)['stdout'].strip()
# if there are no tags, use the number of commits
if description == "":
cmd = ["git", "--git-dir=%s" % dotgitdir, "log", "--oneline", sha]
commits = run(cmd)['stdout'].strip()
description = str(len(commits.splitlines()))
# replace '-' with '+' in description to not confuse rpm
match = re.search("[^0-9]*", description)
matchlen = len(match.group())
return description[matchlen:].replace('-', '+')
def archive(repo, commit_hash, pin_version, target_dir):
dotgitdir = os.path.join(repo, ".git")
prefix = "%s-%s" % (os.path.basename(repo), pin_version)
path = os.path.join(target_dir, "%s.tar" % prefix)
run(["git", "--git-dir=%s" % dotgitdir, "archive", commit_hash,
"--prefix=%s/" % prefix, "-o", path])
run(["gzip", "--no-name", "-f", path])
return path + ".gz"
def pinned_spec_of_spec(spec_path, pin_version, source_path):
spec_in = open(spec_path)
spec_contents = spec_in.readlines()
spec_in.close()
source_url = "file://" + os.path.abspath(source_path)
pinned_spec = []
for line in spec_contents:
# replace the source url
match = re.match(r'^([Ss]ource\d*:\s+)(.+)\n', line)
if match:
line = match.group(1) + source_url + "\n"
# replace the use of the version macro in the spec contents
line = line.replace("%{version}", pin_version)
pinned_spec.append(line)
return "".join(pinned_spec)
def parse_args_or_exit(argv=None):
"""
Parse command line options
"""
parser = argparse.ArgumentParser(
description='Pin a package to a specific version')
parser.add_argument('spec', help='RPM Spec file')
parser.add_argument('pin', help='Specific version, local path or git url')
parser.add_argument('output_dir', help='Path to write output spec file')
parser.add_argument('--remove', '-r', help='Remove pin for this package',
action='store_true')
parser.add_argument('--verbose', '-v', help='Be verbose',
action='store_true')
return parser.parse_args(argv)
def main(argv):
"""
Main function
"""
args = parse_args_or_exit(argv)
if args.verbose:
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
if os.path.exists(args.output_dir):
if not os.path.isdir(args.output_dir):
raise Exception(
"Output directory exists and is not a directory: '%s'" %
args.output_dir)
else:
os.makedirs(args.output_dir)
# we're assuming for now that the target is a git repository
repo, _, hash = args.pin.partition('#')
pin_version = describe(repo, hash) if hash else describe(repo)
source_path = archive(repo, hash, pin_version, args.output_dir)
spec_filename = os.path.basename(args.spec)
output_spec_path = os.path.join(args.output_dir, spec_filename)
with open(output_spec_path, 'w') as f:
f.write(pinned_spec_of_spec(args.spec, pin_version, source_path))
def _main():
"""
Entry point for setuptools CLI wrapper
"""
main(sys.argv[1:])
# Entry point when run directly
if __name__ == "__main__":
_main()
| Python | 0 | |
d98d45ec500c2850a507ddd248abff62a9253add | Add Albany package. (#8332) | var/spack/repos/builtin/packages/albany/package.py | var/spack/repos/builtin/packages/albany/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Albany(CMakePackage):
"""Albany is an implicit, unstructured grid, finite element code for the
solution and analysis of multiphysics problems. The Albany repository
on the GitHub site contains hundreds of regression tests and examples
that demonstrate the code's capabilities on a wide variety of problems
including fluid mechanics, solid mechanics (elasticity and plasticity),
ice-sheet flow, quantum device modeling, and many other applications."""
homepage = "http://gahansen.github.io/Albany"
url = "https://github.com/gahansen/Albany/tarball/master"
maintainers = ['gahansen']
version('develop', git='https://github.com/gahansen/Albany.git', branch='master')
variant('lcm', default=True,
description='Enable LCM')
variant('aeras', default=False,
description='Enable AERAS')
variant('qcad', default=False,
description='Enable QCAD')
variant('hydride', default=False,
description='Enable HYDRIDE')
variant('lcm_spec', default=False,
description='Enable LCM_SPECULATIVE')
variant('lame', default=False,
description='Enable LAME')
variant('debug', default=False,
description='Enable DEBUGGING')
variant('fpe', default=False,
description='Enable CHECK_FPE')
variant('scorec', default=False,
description='Enable SCOREC')
variant('felix', default=False,
description='Enable FELIX')
variant('mor', default=False,
description='Enable MOR')
variant('confgui', default=False,
description='Enable Albany configuration (CI) GUI')
variant('ascr', default=False,
description='Enable ALBANY_ASCR')
variant('perf', default=False,
description='Enable PERFORMANCE_TESTS')
variant('64bit', default=True,
description='Enable 64BIT')
# Add dependencies
depends_on('mpi')
depends_on('trilinos~superlu-dist+isorropia+tempus+rythmos+teko+intrepid+intrepid2+minitensor+phalanx+pnetcdf+nox+piro+rol+shards+stk+superlu@master,develop')
def cmake_args(self):
spec = self.spec
trilinos_dir = spec['trilinos'].prefix
options = []
options.extend([
'-DALBANY_TRILINOS_DIR:FILEPATH={0}'.format(trilinos_dir),
'-DINSTALL_ALBANY:BOOL=ON'
])
options.extend([
'-DENABLE_LCM:BOOL=%s' % (
'ON' if '+lcm' in spec else 'OFF'),
'-DENABLE_AERAS:BOOL=%s' % (
'ON' if '+aeras' in spec else 'OFF'),
'-DENABLE_QCAD:BOOL=%s' % (
'ON' if '+qcad' in spec else 'OFF'),
'-DENABLE_HYDRIDE:BOOL=%s' % (
'ON' if '+hydride' in spec else 'OFF'),
'-DENABLE_LCM_SPECULATIVE:BOOL=%s' % (
'ON' if '+lcm_spec' in spec else 'OFF'),
'-DENABLE_LAME:BOOL=%s' % (
'ON' if '+lame' in spec else 'OFF'),
'-DENABLE_DEBUGGING:BOOL=%s' % (
'ON' if '+debug' in spec else 'OFF'),
'-DENABLE_CHECK_FPE:BOOL=%s' % (
'ON' if '+fpe' in spec else 'OFF'),
'-DENABLE_SCOREC:BOOL=%s' % (
'ON' if '+scorec' in spec else 'OFF'),
'-DENABLE_FELIX:BOOL=%s' % (
'ON' if '+felix' in spec else 'OFF'),
'-DENABLE_MOR:BOOL=%s' % (
'ON' if '+mor' in spec else 'OFF'),
'-DENABLE_ALBANY_CI:BOOL=%s' % (
'ON' if '+ci' in spec else 'OFF'),
'-DENABLE_ASCR:BOOL=%s' % (
'ON' if '+ascr' in spec else 'OFF'),
'-DENABLE_PERFORMANCE_TESTS:BOOL=%s' % (
'ON' if '+perf' in spec else 'OFF'),
'-DENABLE_64BIT_INT:BOOL=%s' % (
'ON' if '+64bit' in spec else 'OFF')
])
return options
| Python | 0 | |
f982cd78ae79f77c2ca59440de20de37002d6658 | Add a pcakge: libzip. (#3656) | var/spack/repos/builtin/packages/libzip/package.py | var/spack/repos/builtin/packages/libzip/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libzip(AutotoolsPackage):
"""libzip is a C library for reading, creating,
and modifying zip archives."""
homepage = "https://nih.at/libzip/index.html"
url = "https://nih.at/libzip/libzip-1.2.0.tar.gz"
version('1.2.0', '5c3372ab3a7897295bfefb27f745cf69')
| Python | 0.001174 | |
9877c21c502b27460f70e6687ed3fd6a2d3fd0d5 | add new package at v8.3.0 (#27446) | var/spack/repos/builtin/packages/racket/package.py | var/spack/repos/builtin/packages/racket/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Racket(Package):
"""The Racket programming language."""
homepage = "https://www.racket-lang.org"
url = "https://download.racket-lang.org/releases/8.3/installers/racket-src.tgz"
maintainers = ['arjunguha']
version('8.3.0', 'c4af1a10b957e5fa0daac2b5ad785cda79805f76d11482f550626fa68f07b949')
depends_on('libffi', type=('build', 'link', 'run'))
depends_on('patchutils')
phases = ['configure', 'build', 'install']
def configure(self, spec, prefix):
with working_dir('src'):
configure = Executable('./configure')
configure("--prefix", prefix)
def build(self, spec, prefix):
with working_dir('src'):
make()
def install(self, spec, prefix):
with working_dir('src'):
make('install')
| Python | 0 | |
bd49a4c82e011d7c5025abc15324220b1496f8c8 | add deepspeech.py to support DeepSpeech | deepspeech.py | deepspeech.py | import subprocess
class DeepSpeechRecognizer():
def __init__(self, model=None, alphabet=None, lm=None, trie=None):
self.model = model
self.alphabet = alphabet
self.lm = lm
self.trie = trie
def recognize(self, audio_file):
"""recognize audio file
args:
audio_file (str)
return:
result (str/False)
"""
output = subprocess.getoutput("deepspeech --model {} --alphabet {} --lm {} --trie {} --audio {}".format(self.model, self.alphabet, self.lm, self.trie, audio_file))
for index, line in enumerate(output.split("\n")):
if line.startswith("Inference took "):
return output.split("\n")[index + 1]
return None
if __name__=="__main__":
recognizer = DeepSpeechRecognizer(r"models/output_graph.pbmm", r"models/alphabet.txt", r"models/lm.binary", r"models/trie")
result = recognizer.recognize("audio/8455-210777-0068.wav")
print(result)
| Python | 0.000001 | |
ca09dc0b9d555f10aafb17380a9a8592727d0a0f | Add dp/SPOJ-ROCK.py | dp/SPOJ-ROCK.py | dp/SPOJ-ROCK.py | def compute_zero_counts(rock_desc):
zero_counts = [0 for i in xrange(N+1)]
for i in xrange(1, N+1):
zero_counts[i] = zero_counts[i-1]
if rock_desc[i-1] == '0':
zero_counts[i] += 1
return zero_counts
def score(zero_counts, start, end):
length = end - start + 1
zeroes = zero_counts[end] - zero_counts[start-1]
ones = length - zeroes
if ones > zeroes:
return length
return 0
t = int(raw_input())
for case in xrange(t):
N = int(raw_input())
rock_desc = raw_input()
zero_counts = compute_zero_counts(rock_desc)
dp = [0 for i in xrange(N+1)]
for i in xrange(1,N+1):
for j in xrange(0,i):
dp[i] = max(dp[i], dp[j] + score(zero_counts, j+1, i))
print dp[N]
| Python | 0.000001 | |
50d05aabc2eb1d5bcb20d457dd05d2882b983afa | Add installation script for profiler. | install_and_run.py | install_and_run.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install and run the TensorBoard plugin for performance analysis.
Usage: python3 install_and_run.py --envdir ENVDIR --logdir LOGDIR
"""
# Lint as: python3
import argparse
import os
import subprocess
def run(*args):
"""Runs a shell command."""
subprocess.run(' '.join(args), shell=True, check=True)
class VirtualEnv(object):
"""Creates and runs programs in a virtual environment."""
def __init__(self, envdir):
self.envdir = envdir
run('virtualenv', '--system-site-packages', '-p', 'python3', self.envdir)
def run(self, program, *args):
run(os.path.join(self.envdir, 'bin', program), *args)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--envdir', help='Virtual environment', required=True)
parser.add_argument('--logdir', help='TensorBoard logdir', required=True)
args = parser.parse_args()
venv = VirtualEnv(args.envdir)
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorboard')
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorflow')
venv.run('pip3', 'install', '-q', '-U', 'tf-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tb-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tensorboard_plugin_profile')
venv.run('tensorboard', '--logdir=' + args.logdir, '--bind_all')
if __name__ == '__main__':
main()
| Python | 0 | |
734967196c8f0577b218802c16d9eab31c9e9054 | Add problem 36, palindrome binaries | problem_36.py | problem_36.py | from time import time
def is_palindrome(s):
for idx in range(len(s)/2):
if s[idx] != s[-1*idx - 1]:
return False
return True
def main():
palindrom_nums = [num for num in range(int(1e6)) if is_palindrome(str(num)) and is_palindrome(str(bin(num))[2:])]
print 'Palindroms:', palindrom_nums
print 'Palindrom sum:', sum(palindrom_nums)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
| Python | 0.999978 | |
ad9a9df8e144c41456aeded591081a3a339853f3 | Create RLU_forward_backward.py | Neural-Networks/RLU_forward_backward.py | Neural-Networks/RLU_forward_backward.py |
from numpy import *
from RLU_neural_forward import *
from RLU_back_propagation import *
def forwardBackward(xi, x, y, MT, time_queue, good_queue, DELTA_queue):
A = neural_forward(xi, x, MT)
check = argmax(A[-xi[-1]:])
# send back some progress statistic
if y[check]-1 == 0:
good = good_queue.get()
good += 1
good_queue.put(good)
good_queue.task_done()
time = time_queue.get()
time += 1
time_queue.put(time)
time_queue.task_done()
DELTA = DELTA_queue.get()
DELTA = DELTA + back_propagation(y, A, MT, xi)
DELTA_queue.put(DELTA)
DELTA_queue.task_done()
| Python | 0.000019 | |
1ecb4a0711304af13f41ae1aae67792057783334 | Create ScaperUtils.py | data/ScaperUtils.py | data/ScaperUtils.py | class ScraperUtil (object) :
class Base :
def __init__(self,data_get,data_parse, data_formatter=None) :
self.get_data = data_get
self.parse_data = data_parse
self.data_formatter = data_formatter
class Yahoo(Base) :
def __init__(self,data_get,data_format,data_parse) :
ScraperUtil.Base.__init__( self,data_get,data_parse,data_format)
def __call__(self,symbol) :
ret = self.get_data(symbol)
if self.data_formatter is not None :
ret = self.data_formatter(ret)
for token in self.parse_data(ret) :
yield token
class Nasdaq(Base) :
def __init__(self,data_get,data_parse,data_formatter,exchange_list=None,unwanted_keys_list=None) :
ScraperUtil.Base.__init__( self,data_get,data_parse,data_formatter)
self.exchanges=["nyse", "nasdaq"]
self.unwanted_keys=['Summary Quote','MarketCap','LastSale','IPOyear','Unnamed: 8']
if exchange_list is not None : self.exchanges = exchange_list
if unwanted_keys_list is not None : self.unwanted_keys = unwanted_keys_list
def __call__(self,exchange_list=None,unwanted_keys_list=None) :
exchanges = self.exchanges
unwanted_keys = self.unwanted_keys
if exchange_list is not None : exchanges = exchange_list
if unwanted_keys_list is not None : unwanted_keys = unwanted_keys_list
ret = None
for exchange in exchanges :
if ret is None : ret = self.get_data(exchange)
else : ret = b"".join([ret, self.get_data(exchange)])
ret = self.parse_data(ret)
if self.data_formatter is not None :
ret = self.data_formatter(ret,unwanted_keys,exchange)
return ret.reindex()
class NasdaqService() :
def __init__(self,service) :
self.service = service
self.fresh = None
self.cache = None
def __call__(self) :
if self.cache is None or not self.fresh():
self.cache = self.service()
self.fresh = TimeUtil.ExpireTimer(24*60)
return self.cache
class StockService() :
def __init__(self) :
self.fresh = {}
self.cache = {}
def __call__(self,stock) :
if stock not in self.cache.keys() or not self.fresh[stock]():
y1,y2,r = get_year_parameters()
self.cache[stock] = get_yahoo_historical(stock,y1)
self.fresh[stock] = TimeUtil.ExpireTimer(24*60)
return self.cache[stock]
| Python | 0 | |
6d33ed73adeea4808ed4b3b9bd8642ad83910dfc | add ridgeline example (#1519) | altair/examples/ridgeline_plot.py | altair/examples/ridgeline_plot.py | """
Ridgeline plot (Joyplot) Example
--------------------------------
A `Ridgeline plot <https://serialmentor.com/blog/2017/9/15/goodbye-joyplots>`_
chart is a chart that lets you visualize distribution of a numeric value for
several groups.
Such a chart can be created in Altair by first transforming the data into a
suitable representation.
"""
# category: other charts
import altair as alt
from vega_datasets import data
source = data.seattle_weather.url
step = 20
overlap = 1
ridgeline = alt.Chart(source).transform_timeunit(
Month='month(date)'
).transform_joinaggregate(
mean_temp='mean(temp_max)', groupby=['Month']
).transform_bin(
['bin_max', 'bin_min'], 'temp_max'
).transform_aggregate(
value='count()', groupby=['Month', 'mean_temp', 'bin_min', 'bin_max']
).transform_impute(
impute='value', groupby=['Month', 'mean_temp'], key='bin_min', value=0
).mark_line(
interpolate='monotone',
fillOpacity=0.8,
stroke='lightgray',
strokeWidth=0.5
).encode(
alt.X('bin_min:Q', bin='binned', title='Maximum Daily Temperature (C)'),
alt.Y(
'value:Q',
scale=alt.Scale(range=[step, -step * overlap]),
axis=None
),
alt.Fill(
'mean_temp:Q',
legend=None,
scale=alt.Scale(domain=[30, 5], scheme='redyellowblue')
),
alt.Row(
'Month:T',
title=None,
header=alt.Header(labelAngle=0, labelAlign='right', format='%B')
)
).properties(
bounds='flush', title='Seattle Weather', height=step
).configure_facet(
spacing=0
).configure_view(
stroke=None
).configure_title(
anchor='end'
)
ridgeline
| Python | 0 | |
dfe65e6839a4347c7acfc011f052db6ec4ee1d9d | test Task | tests/unit/test_task.py | tests/unit/test_task.py | import sys
from zorn import tasks
from io import StringIO
def test_task():
task = tasks.Task()
assert task.verbosity == 1
def test_parse_verbosity_standard():
silent = False
verbose = False
verbosity = tasks.Task.parse_verbosity(verbose, silent)
assert verbosity == 1
def test_parse_verbosity_silent():
silent = True
verbose = False
verbosity = tasks.Task.parse_verbosity(verbose, silent)
assert verbosity == 0
silent = True
verbose = True
verbosity = tasks.Task.parse_verbosity(verbose, silent)
assert verbosity == 0
def test_parse_verbosity_verbose():
silent = False
verbose = True
verbosity = tasks.Task.parse_verbosity(verbose, silent)
assert verbosity == 2
def test_comunicate_standard_verbosity():
task = tasks.Task(1)
stdout_ = sys.stdout
stream = StringIO()
sys.stdout = stream
task.communicate('standard')
task.communicate('verbose', False)
sys.stdout = stdout_
assert stream.getvalue() == 'standard\n'
def test_comunicate_silent():
task = tasks.Task(0)
stdout_ = sys.stdout
stream = StringIO()
sys.stdout = stream
task.communicate('standard')
task.communicate('verbose', False)
sys.stdout = stdout_
assert stream.getvalue() == ''
def test_comunicate_verbose():
task = tasks.Task(2)
stdout_ = sys.stdout
stream = StringIO()
sys.stdout = stream
task.communicate('standard')
task.communicate('verbose', False)
sys.stdout = stdout_
assert stream.getvalue() == 'standard\nverbose\n'
| Python | 0.999998 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.