commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
8692557a3389403b7a3450065d99e3750d91b2ed | Create views.py | pagination_bootstrap/views.py | pagination_bootstrap/views.py | Python | 0 | ||
060c6d2eeea2235cda955c873b50e0aa2a4accd0 | use 20 | farmer/models.py | farmer/models.py | #coding=utf8
import os
import time
import json
from datetime import datetime
from commands import getstatusoutput
from django.db import models
class Job(models.Model):
# hosts, like web_servers:host1 .
inventories = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
result = models.TextField(null = True)
start = models.DateTimeField(null = True)
end = models.DateTimeField(null = True)
@property
def cmd_shell(self):
option = self.sudo and '--sudo -f 20 -m shell -a' or '-f 20 -m shell -a'
return 'ansible %s %s "%s"' % (self.inventories, option, self.cmd)
def run(self):
if os.fork() == 0:
tmpdir = '/tmp/ansible_%s' % time.time()
os.mkdir(tmpdir)
self.start = datetime.now()
self.save()
cmd_shell = self.cmd_shell + ' -t ' + tmpdir
status, output = getstatusoutput(cmd_shell)
self.end = datetime.now()
result = {}
for f in os.listdir(tmpdir):
result[f] = json.loads(open(tmpdir + '/' + f).read())
self.rc = status
self.result = json.dumps(result)
self.save()
os.system('rm -rf ' + tmpdir)
def __unicode__(self):
return self.cmd_shell
| #coding=utf8
import os
import time
import json
from datetime import datetime
from commands import getstatusoutput
from django.db import models
class Job(models.Model):
# hosts, like web_servers:host1 .
inventories = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
result = models.TextField(null = True)
start = models.DateTimeField(null = True)
end = models.DateTimeField(null = True)
@property
def cmd_shell(self):
option = self.sudo and '--sudo -m shell -a' or '-m shell -a'
return 'ansible %s %s "%s"' % (self.inventories, option, self.cmd)
def run(self):
if os.fork() == 0:
tmpdir = '/tmp/ansible_%s' % time.time()
os.mkdir(tmpdir)
self.start = datetime.now()
self.save()
cmd_shell = self.cmd_shell + ' -t ' + tmpdir
status, output = getstatusoutput(cmd_shell)
self.end = datetime.now()
result = {}
for f in os.listdir(tmpdir):
result[f] = json.loads(open(tmpdir + '/' + f).read())
self.rc = status
self.result = json.dumps(result)
self.save()
os.system('rm -rf ' + tmpdir)
def __unicode__(self):
return self.cmd_shell
| Python | 0.99981 |
799109759114d141d71bed777b9a1ac2ec26a264 | add Red object detection | python/ObjectDetection/RedExtractObject.py | python/ObjectDetection/RedExtractObject.py | import cv2
import numpy as np
video = cv2.VideoCapture(0)
while (1):
# Take each frame
_, frame = video.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_red = np.array([150, 50, 50])
upper_red = np.array([255, 255, 150])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_red, upper_red) # Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows() | Python | 0.000002 | |
21490bd6cd03d159a440b2c13a6b4641c789c954 | Add example | examples/example.py | examples/example.py | import sys
from tumblpy import Tumblpy
key = raw_input('App Consumer Key: ')
secret = raw_input('App Consumer Secret: ')
if not 'skip-auth' in sys.argv:
t = Tumblpy(key, secret)
callback_url = raw_input('Callback URL: ')
auth_props = t.get_authentication_tokens(callback_url=callback_url)
auth_url = auth_props['auth_url']
OAUTH_TOKEN_SECRET = auth_props['oauth_token_secret']
print('Connect with Tumblr via: {}'.format(auth_url))
oauth_token = raw_input('OAuth Token (from callback url): ')
oauth_verifier = raw_input('OAuth Verifier (from callback url): ')
t = Tumblpy(key, secret, oauth_token, OAUTH_TOKEN_SECRET)
authorized_tokens = t.get_authorized_tokens(oauth_verifier)
final_oauth_token = authorized_tokens['oauth_token']
final_oauth_token_secret = authorized_tokens['oauth_token_secret']
print('OAuth Token: {}'.format(final_oauth_token))
print('OAuth Token Secret: {}'.format(final_oauth_token_secret))
else:
final_oauth_token = raw_input('OAuth Token: ')
final_oauth_token_secret = raw_input('OAuth Token Secret: ')
t = Tumblpy(key, secret, final_oauth_token, final_oauth_token_secret)
blog_url = t.post('user/info')
blog_url = blog_url['user']['blogs'][0]['url']
print('Your blog url is: {}'.format(blog_url))
posts = t.posts(blog_url)
print('Here are some posts this blog has made:', posts)
# print t.post('post', blog_url=blog_url, params={'type':'text', 'title': 'Test', 'body': 'Lorem ipsum.'})
| Python | 0.000003 | |
ece6fb4561e338e32e8527a068cd386f00886a67 | Add example with reuters dataset. | examples/reuters.py | examples/reuters.py | """shell
!pip install -q -U pip
!pip install -q -U autokeras==1.0.8
!pip install -q git+https://github.com/keras-team/keras-tuner.git@1.0.2rc1
"""
"""
Search for a good model for the
[Reuters](https://keras.io/ja/datasets/#_5) dataset.
"""
import tensorflow as tf
from tf.keras.datasets import reuters
import numpy as np
import autokeras as ak
# Prepare the dataset.
def reuters_raw(max_features=20000):
index_offset = 3 # word index offset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.reuters.load_data(
num_words=max_features,
index_from=index_offset)
x_train = x_train
y_train = y_train.reshape(-1, 1)
x_test = x_test
y_test = y_test.reshape(-1, 1)
word_to_id = tf.keras.datasets.reuters.get_word_index()
word_to_id = {k: (v + index_offset) for k, v in word_to_id.items()}
word_to_id["<PAD>"] = 0
word_to_id["<START>"] = 1
word_to_id["<UNK>"] = 2
id_to_word = {value: key for key, value in word_to_id.items()}
x_train = list(map(lambda sentence: ' '.join(
id_to_word[i] for i in sentence), x_train))
x_test = list(map(lambda sentence: ' '.join(
id_to_word[i] for i in sentence), x_test))
x_train = np.array(x_train, dtype=np.str)
x_test = np.array(x_test, dtype=np.str)
return (x_train, y_train), (x_test, y_test)
# Prepare the data.
(x_train, y_train), (x_test, y_test) = reuters_raw()
print(x_train.shape) # (8982,)
print(y_train.shape) # (8982, 1)
print(x_train[0][:50]) # <START> <UNK> <UNK> said as a result of its decemb
# Initialize the TextClassifier
clf = ak.TextClassifier(
max_trials=5,
overwrite=True,
)
# Callback to avoid overfitting with the EarlyStopping.
cbs = [
tf.keras.callbacks.EarlyStopping(patience=3),
]
# Search for the best model.
clf.fit(
x_train,
y_train,
epochs=10,
callback=cbs
)
# Evaluate on the testing data.
print('Accuracy: {accuracy}'.format(accuracy=clf.evaluate(x_test, y_test)))
| Python | 0 | |
315914bbec88e11bf5ed3bcab29218592549eccf | Create Kmeans.py | Kmeans.py | Kmeans.py | import collections
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from pprint import pprint
import csv
import pandas
def word_tokenizer(text):
#tokenizes and stems the text
tokens = word_tokenize(text)
stemmer = PorterStemmer()
tokens = [stemmer.stem(t) for t in tokens if t not in stopwords.words('english')]
return tokens
def cluster_sentences(sentences, nb_of_clusters=5):
tfidf_vectorizer = TfidfVectorizer(tokenizer=word_tokenizer,
stop_words=stopwords.words('english'),
max_df=0.99,
min_df=0.01,
lowercase=True)
#builds a tf-idf matrix for the sentences
tfidf_matrix = tfidf_vectorizer.fit_transform(sentences)
kmeans = KMeans(n_clusters=nb_of_clusters)
kmeans.fit(tfidf_matrix)
clusters = collections.defaultdict(list)
for i, label in enumerate(kmeans.labels_):
clusters[label].append(i)
return dict(clusters)
import csv
with open(r'C:\Sales\SP.csv') as f:
reader = csv.reader(f)
Pre_sentence = list(reader)
flatten = lambda l: [item for sublist in l for item in sublist]
sentences = flatten(Pre_sentence)
with open(r'C:\Sales\Cat.csv') as g:
reader_cat = csv.reader(g)
Pre_Cat = list(reader_cat)
Cats = flatten(Pre_Cat)
if __name__ == "__main__":
# sentences = ["Nature is beautiful","I like green apples",
# "We should protect the trees","Fruit trees provide fruits",
# "Green apples are tasty","My name is Dami"]
nclusters= 100
clusters = cluster_sentences(sentences, nclusters)
for cluster in range(nclusters):
print ("Grouped Engagements ",cluster,":")
for i,sentence in enumerate(clusters[cluster]):
print ("\tEngagement ", Cats[sentence],": ", sentences[sentence])
| Python | 0 | |
b0377568c9b927db588b006b7312cbe8ed9d48b7 | Add tremelo example | examples/tremelo.py | examples/tremelo.py | # Author: Martin McBride
# Created: 2016-01-08
# Copyright (C) 2016, Martin McBride
# License: MIT
# Website sympl.org/pysound
#
# Square wave example
try:
import pysound
except ImportError:
# if pysound is not installed append parent dir of __file__ to sys.path
import sys, os
sys.path.insert(0, os.path.abspath(os.path.split(os.path.abspath(__file__))[0]+'/..'))
from pysound.components.soundfile import write_wav
from pysound.components.wavetable import square_wave
from pysound.components.wavetable import sine_wave
#
# Create a tremelo effect
#
amp = sine_wave(frequency=10, amplitude=0.1, offset = 0.8)
wave = square_wave(frequency=400, amplitude=amp)
write_wav(source=wave, filename='tremelo.wav') | Python | 0.000358 | |
ea26478495d5aec6925e32c9a87245bf2e1e4bc8 | Add script demonstrating raising and catching Exceptions. | rps/errors.py | rps/errors.py | gestures = ["rock", "paper", "scissors"]
def verify_move(player_move):
if player_move not in gestures:
raise Exception("Wrong input!")
return player_move
# let's catch an exception
try:
player_move = verify_move(input("[rock,paper,scissors]: "))
print("The move was correct.")
except Exception:
print("The move was incorrect and Exception was raised.")
| Python | 0 | |
fb95c75b7b43bcb1fa640e4de3181fd0431c5837 | Add the unittest test_plot.py | ionic_liquids/test/test_plot.py | ionic_liquids/test/test_plot.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
FIG_SIZE = (4, 4)
def test_parity_plot():
"""
Test the parity plot
Input
-----
y_pred : predicted values from the model
y_act : 'true' (actual) values
Output
------
fig : matplotlib figure
Check:
1. The x,y vector has the same datatype
2. The x,y vector has the same dimension
"""
y_pred=np.arange(0,1)
y_act=np.arange(0,1)
assert isinstance(y_pred,type(y_act)), "The two column in the parity plot should have same datatype"
assert len(y_pred)==len(y_act), "The two column in the parity plot should have same length"
fig = plt.figure(figsize=FIG_SIZE)
plt.scatter(y_act, y_pred)
plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],
lw=4, color='r')
plt.xlabel('Actual')
plt.ylabel('Predicted')
return fig
def test_train_test_error():
"""
Test the plot of training vs. test error
Input
-----
e_train : numpy array of training errors
e_test : numpy array of test errors
model_params : independent parameters of model (eg. alpha in LASSO)
Returns
-------
fig : matplotlib figure
Check:
1. The e_train, e_test and model_params has the same dimension
"""
e_train = np.arange(0,1)
e_test = np.arange(0,1)
model_params = np.arange(0,1)
assert len(e_train)==len(model_params), "The training error and model parameters should have the same dimension"
assert len(e_test)==len(model_params), "The test error and model parameters should have the same dimension"
fig = plt.figure(figsize=FIG_SIZE)
plt.plot(model_params, e_train, label='Training Set')
plt.plot(model_params, e_train, label='Test Set')
plt.xlabel('Model Parameter')
plt.ylabel('MSE of model')
plt.legend()
return fig
def test_scatter_plot():
"""
Test plot of predicted electric conductivity as a
function of the mole fractions.
Input
-----
x_vals : numpy vector x-axis (mole fractions)
y_vals : numpy vector y-axis (predicted conductivities)
x_variable : string for labeling the x-axis
Returns
------
fig : matplotlib figure
Check:
1. The x_variable is a string
2. The x,y vector has the same dimension
"""
x_variable = 'm'
x_vals = np.arange(0,1)
y_vals = np.arange(0,1)
assert isinstance(x_variable,str), "x_variable should be a string variable"
assert len(x_vals)==len(y_vals), "The x and y vector should have the same dimension"
if (x_variable == 'm'):
x_variable = 'Mole Fraction A'
elif (x_variable == 'p'):
x_variable = 'Pressure (kPa)'
elif (x_variable == 't'):
x_variable = 'Temperature (K)'
fig = plt.figure(figsize=FIG_SIZE)
plt.scatter(x_vals, y_vals)
plt.xlabel(x_variable)
plt.ylabel('Electrical Conductivity')
return fig
| Python | 0.999191 | |
ecb3bd6fd9b6496a751a2145909648ba1be8f908 | add linear interpolation tests | isochrones/tests/test_interp.py | isochrones/tests/test_interp.py | import itertools
import logging
import numpy as np
import pandas as pd
from scipy.interpolate import RegularGridInterpolator
from isochrones.interp import DFInterpolator
def test_interp():
xx, yy, zz = [np.arange(10 + np.log10(n))*n for n in [1, 10, 100]]
def func(x, y, z):
return x**2*np.cos(y/10) + z
df = pd.DataFrame([(x, y, z, func(x, y, z)) for x, y, z in itertools.product(xx, yy, zz)],
columns=['x', 'y', 'z', 'val']).set_index(['x','y', 'z'])
grid = np.reshape(df.val.values, (10, 11, 12))
interp = RegularGridInterpolator([xx, yy, zz], grid)
df_interp = DFInterpolator(df)
grid_pars = [6, 50, 200]
pars = [3.1, 44, 503]
# Make sure grid point returns correct exact value
assert df_interp(grid_pars, 'val') == func(*grid_pars)
# Check linear interpolation vis-a-vis scipy
try:
assert np.isclose(df_interp(pars, 'val'), interp(pars)[0], rtol=1e-10, atol=1e-11)
except AssertionError:
logging.debug('mine: {}, scipy: {}'.format(df_interp(pars, 'val'), interp(pars)[0]))
raise
| Python | 0.000001 | |
947c9ef100686fa1ec0acaa10bc49bf6c785665b | Use unified class for json output | ffflash/container.py | ffflash/container.py | from os import path
from ffflash import RELEASE, log, now, timeout
from ffflash.lib.clock import epoch_repr
from ffflash.lib.data import merge_dicts
from ffflash.lib.files import read_json_file, write_json_file
class Container:
def __init__(self, spec, filename):
self._spec = spec
self._location = path.abspath(filename)
self.data = read_json_file(self._location, fallback={})
self._info()
def _info(self, info={}):
self.data['_info'] = self.data.get('_info', {})
self.data['_info']['generator'] = RELEASE
self.data['_info']['access'] = self.data['_info'].get('access', {})
if not self.data['_info']['access'].get('first', False):
self.data['_info']['access']['first'] = now
self.data['_info']['access']['last'] = now
self.data['_info']['access']['overall'] = epoch_repr(
abs(now - self.data['_info']['access']['first']),
ms=True
)
self.data['_info']['access']['timeout'] = timeout
if info:
self.data['_info'] = merge_dicts(self.data['_info'], info)
def save(self, info={}):
self._info(info)
if write_json_file(self._location, self.data):
log.info('{} saved {}'.format(self._spec, self._location))
| Python | 0.000011 | |
6c5dad5d617892a3ea5cdd20cbaef89189307195 | add simple content-based model for coldstart | polara/recommender/coldstart/models.py | polara/recommender/coldstart/models.py | import numpy as np
from polara.recommender.models import RecommenderModel
class ContentBasedColdStart(RecommenderModel):
def __init__(self, *args, **kwargs):
super(ContentBasedColdStart, self).__init__(*args, **kwargs)
self.method = 'CB'
self._key = '{}_cold'.format(self.data.fields.itemid)
self._target = self.data.fields.userid
def build(self):
pass
def get_recommendations(self):
item_similarity_scores = self.data.cold_items_similarity
user_item_matrix = self.get_training_matrix()
user_item_matrix.data = np.ones_like(user_item_matrix.data)
scores = item_similarity_scores.dot(user_item_matrix.T).tocsr()
top_similar_users = self.get_topk_elements(scores).astype(np.intp)
return top_similar_users
| Python | 0 | |
2ca6b22e645cbbe63737d4ac3929cb23700a2e06 | Prepare v1.2.342.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.342.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.341'
| Python | 0.000002 |
edbc9f2c31f98e1447c352058aa05e6884a0927b | Create fold_eigenvalues.py | fold_eigenvalues.py | fold_eigenvalues.py | #Definition of inputs and outputs
#==================================
##[Mes scripts GEOL]=group
##entree=vector
##dip_dir=field entree
##dip=field entree
#Algorithm body
#==================================
from qgis.core import *
from apsg import *
layer = processing.getObject(entree)
dipdir = layer.fieldNameIndex(dip_dir)
dip = layer.fieldNameIndex(dip)
if layer.selectedFeatureCount():
g= Group([Vec3(Fol(elem.attributes()[dipdir],elem.attributes()[dip])) for elem in layer.selectedFeatures()],name='plis')
else:
g= Group([Vec3(Fol(elem.attributes()[dipdir],elem.attributes()[dip])) for elem in layer.getFeatures()],name='plis')
resultat= "fold plunge: : " + str(int(round(Ortensor(g).eigenlins.data[2].dd[1]))) + " -> " + str(int(round(Ortensor(g).eigenlins.data[2].dd[0])))
s = StereoNet()
a = s.ax
s.line(g.aslin, 'b.',markersize=18)
s.line(Ortensor(g).eigenlins.data[0],'g.',markersize=18)
s.plane(Ortensor(g).eigenfols.data[0],'g')
s.line(Ortensor(g).eigenlins.data[1],'c.',markersize=18)
s.plane(Ortensor(g).eigenfols.data[1],'c')
s.line(Ortensor(g).eigenlins.data[2],'r.',markersize=18)
s.plane(Ortensor(g).eigenfols.data[2],'r')
a.set_title(resultat, y=1.06, size=14, color='red')
s.show()
| Python | 0.000002 | |
f55771da6a617c71f2eb620c11fb54e033c64338 | Migrate upload-orange-metadata process type | resolwe_bio/migrations/0002_metadata_table_type.py | resolwe_bio/migrations/0002_metadata_table_type.py | from django.db import migrations
from resolwe.flow.migration_ops import ResolweProcessChangeType
class Migration(migrations.Migration):
"""
Change the ``upload-orange-metadata`` process type.
"""
dependencies = [
("resolwe_bio", "0001_squashed_0015_sample_indices"),
]
operations = [
ResolweProcessChangeType(
process="upload-orange-metadata",
new_type="data:metadata:unique:",
),
]
| Python | 0 | |
4170807e4a1c70eef6416fe3f1661c9c1c99a9da | Add pysal test | tests/test_pysal.py | tests/test_pysal.py | import unittest
from pysal.weights import lat2W
class TestPysal(unittest.TestCase):
def test_distance_band(self):
w = lat2W(4,4)
self.assertEqual(16, w.n) | Python | 0.000026 | |
484a2bf0c28aa2bbc910ca20849840bf518d4329 | Add utils.banners test case | tests/test_utils.py | tests/test_utils.py | # Foremast - Pipeline Tooling
#
# Copyright 2016 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils."""
from unittest import mock
from foremast.utils.banners import banner
@mock.patch('foremast.utils.banners.LOG')
def test_utils_banner(mock_log):
banner('test', border='+', width=10)
mock_log.info.assert_called_with('+' * 10)
| Python | 0.000001 | |
45efbbdfd62cd0f9f8232bfd7ebd1aae0ac6cd17 | Create humidity.py | abstractions/sensor/humidity/humidity.py | abstractions/sensor/humidity/humidity.py | # This code has to be added to __init__.py in folder .../devices/sensor
class Humidity():
def __family__(self):
return "Humidity"
def __getHumidity__(self):
raise NotImplementedError
@api("Humidity", 0)
@request("GET", "sensor/humidity/*")
@response(contentType=M_JSON)
def humidityWildcard(self):
values = {}
humidity = self.__getHumidity__()
values["float"] = "%f" % humidity
values["percent"] = "%d" % (humidity * 100)
return values
@api("Humidity")
@request("GET", "sensor/humidity/float")
@response("%f")
def getHumidity(self):
return self.__getHumidity__()
@api("Humidity")
@request("GET", "sensor/humidity/percent")
@response("%d")
def getHumidityPercent(self):
return self.__getHumidity__() * 100
| Python | 0.001003 | |
c9bd5ba167284d79ae0cbe7aaaf9ec8536bef918 | add hiprec.py | benchexec/tools/hiprec.py | benchexec/tools/hiprec.py | #!/usr/bin/env python
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import subprocess
import sys
import os
import re
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
sys.dont_write_bytecode = True # prevent creation of .pyc files
if __name__ == "__main__":
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
from benchexec.model import SOFTTIMELIMIT
REQUIRED_PATHS = [
"hiprec",
]
class Tool(benchexec.tools.template.BaseTool):
"""
Tool wrapper for HIPrec.
"""
def executable(self):
executable = util.find_executable('hiprec')
return executable
def working_directory(self, executable):
return os.curdir
def name(self):
return 'hiprec'
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
@param returncode: code returned by CPAchecker
@param returnsignal: signal, which terminated CPAchecker
@param output: the output of CPAchecker
@return: status of CPAchecker after executing a run
"""
for line in output:
if line.startswith('Verification result: '):
line = line[22:].strip()
if line.startswith('TRUE'):
newStatus = result.RESULT_TRUE_PROP
elif line.startswith('FALSE'):
newStatus = result.RESULT_FALSE_REACH
else:
newStatus = result.RESULT_UNKNOWN
if not status:
status = newStatus
elif newStatus != result.RESULT_UNKNOWN:
status = "{0} ({1})".format(status, newStatus)
if not status:
status = result.RESULT_UNKNOWN
return status
| Python | 0.002017 | |
d726fd9b05b846097ee877ad0897f8416dbceaf7 | Add missing __init__ | gallery/__init__.py | gallery/__init__.py | from .gallery import *
| Python | 0.998696 | |
057317f9ffb49eae5d11799b6d4191a3fef421e0 | Allow packages to use fake s3: URI's to specify that the package should be loaded from S3 with credentials | boto/pyami/startup.py | boto/pyami/startup.py | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os, sys, traceback
import boto
from boto.utils import find_class
from boto import config
from boto.pyami.scriptbase import ScriptBase
class Startup(ScriptBase):
def run_installer_commands(self):
commands = config.get_value('Pyami', 'installer_commands')
if commands:
for command in commands.split(','):
self.run('apt-get -y %s' % command)
def fetch_s3_file(s3_file):
try:
if s3_filename.startswith('s3:'):
bucket_name, key_name = s3_file[len('s3:'):].split('/')
c = boto.connect_s3()
bucket = c.get_bucket(bucket_name)
key = bucket.get_key(key_name)
print 'Fetching %s.%s' % (bucket.name, key.name)
path = os.path.join(config.get_value('General', 'working_dir'), key.name)
key.get_contents_to_filename(script_path)
except:
path = None
print 'Problem Retrieving file: %s' % s3_file
return path
def load_packages(self):
package_str = config.get_value('Pyami', 'packages')
if package_str:
packages = package_str.split(',')
for package in packages:
if package.startswith('s3:'):
package = fetch_s3_package(package)
if package:
# if the "package" is really a .py file, it doesn't have to
# be installed, just being in the working dir is enough
if not package.endswith('.py'):
self.run('easy_install %s' % package, exit_on_error=False)
def run_scripts(self):
scripts = config.get_value('Pyami', 'scripts')
if scripts:
for script in scripts.split(','):
try:
self.log('Running Script: %s' % script)
module_name, class_name = script.split(':')
cls = find_class(module_name, class_name)
s = cls(self.log_fp)
s.main()
except Exception, e:
self.log('Problem Running Script: %s' % script)
traceback.print_exc(None, self.log_fp)
def main(self):
self.run_installer_commands()
self.load_packages()
self.run_scripts()
self.get_script()
self.notify('Startup Completed for %s' % config.get_instance('instance-id'))
if __name__ == "__main__":
sys.path.append(config.get_value('General', 'working_dir'))
su = Startup()
su.main()
| # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os, sys, traceback
import boto
from boto.utils import find_class
from boto import config
from boto.pyami.scriptbase import ScriptBase
class Startup(ScriptBase):
def run_installer_commands(self):
commands = config.get_value('Pyami', 'installer_commands')
if commands:
for command in commands.split(','):
self.run('apt-get -y %s' % command)
def load_packages(self):
package_str = config.get_value('Pyami', 'packages')
if package_str:
packages = package_str.split(',')
for package in packages:
self.run('easy_install %s' % package, exit_on_error=False)
def get_script(self):
script_name = config.get_value('Pyami', 'script_name')
if script_name:
c = boto.connect_s3()
script_name = script_name + '.py'
script_bucket = config.get_value('Pyami', 'script_bucket')
if not script_bucket:
script_bucket = config.get_value('Pyami', 'bucket_name')
bucket = c.get_bucket(script_bucket)
script = bucket.get_key(script_name)
print 'Fetching %s.%s' % (bucket.name, script.name)
script_path = os.path.join(config.get_value('General', 'working_dir'), script_name)
script.get_contents_to_filename(script_path)
module_name = config.get_value('Pyami', 'script_name')
sys.path.append(config.get_value('General', 'working_dir'))
debug = config.getint('Boto', 'debug')
# debug level greater than 1 means don't even startup the script
if debug > 1:
return
if module_name:
cls = find_class(module_name, config.get_value('Pyami', 'class_name'))
s = cls(config)
s.run()
def run_scripts(self):
scripts = config.get_value('Pyami', 'scripts')
if scripts:
for script in scripts.split(','):
try:
self.log('Running Script: %s' % script)
module_name, class_name = script.split(':')
cls = find_class(module_name, class_name)
s = cls(self.log_fp)
s.main()
except Exception, e:
self.log('Problem Running Script: %s' % script)
traceback.print_exc(None, self.log_fp)
def main(self):
self.run_installer_commands()
self.load_packages()
self.run_scripts()
self.get_script()
self.notify('Startup Completed for %s' % config.get_instance('instance-id'))
if __name__ == "__main__":
su = Startup()
su.main()
| Python | 0.000001 |
76ea7119e075cf6eb86d64768e90cfda124cedf9 | Add benchmarking script | serd_bench.py | serd_bench.py | #!/usr/bin/env python
import optparse
import os
import subprocess
import sys
class WorkingDirectory:
"Scoped context for changing working directory"
def __init__(self, working_dir):
self.original_dir = os.getcwd()
self.working_dir = working_dir
def __enter__(self):
os.chdir(self.working_dir)
return self
def __exit__(self, type, value, traceback):
os.chdir(self.original_dir)
def filename(n):
"Filename for a generated file with n statements"
return 'gen%d.ttl' % n
def gen(sp2b_dir, n_min, n_max, step):
"Generate files with n_min ... n_max statements if they are not present"
with WorkingDirectory(sp2b_dir) as dir:
for n in range(n_min, n_max + step, step):
out_path = os.path.join(dir.original_dir, 'build', filename(n))
if not os.path.exists(out_path):
subprocess.call(['sp2b_gen', '-t', str(n), out_path])
def write_header(results, progs):
"Write the header line for TSV output"
results.write('n')
for prog in progs:
results.write('\t' + os.path.basename(prog.split()[0]))
results.write('\n')
def parse_time(report):
"Return user time and max RSS from a /usr/bin/time -v report"
time = memory = None
for line in report.split('\n'):
if line.startswith('\tUser time'):
time = float(line[line.find(':') + 1:])
elif line.startswith('\tMaximum resident set'):
memory = float(line[line.find(':') + 1:]) * 1024
return (time, memory)
def run(progs, n_min, n_max, step):
"Benchmark each program with n_min ... n_max statements"
with WorkingDirectory('build'):
results = {'time': open('serdi-time.txt', 'w'),
'throughput': open('serdi-throughput.txt', 'w'),
'memory': open('serdi-memory.txt', 'w')}
# Write TSV header for all output files
for name, f in results.iteritems():
write_header(f, progs)
for n in range(n_min, n_max + step, step):
# Add first column (n) to rows
rows = {}
for name, _ in results.iteritems():
rows[name] = [str(n)]
# Run each program and fill rows with measurements
for prog in progs:
cmd = '/usr/bin/time -v ' + prog + ' ' + filename(n)
with open(filename(n) + '.out', 'w') as out:
sys.stderr.write(cmd + '\n')
proc = subprocess.Popen(
cmd.split(), stdout=out, stderr=subprocess.PIPE)
time, memory = parse_time(proc.communicate()[1])
rows['time'] += ['%.07f' % time]
rows['throughput'] += ['%d' % (n / time)]
rows['memory'] += [str(memory)]
# Write rows to output files
for name, f in results.iteritems():
f.write('\t'.join(rows[name]) + '\n')
for name, _ in results.iteritems():
sys.stderr.write('wrote build/serdi-%s.txt\n' % name)
if __name__ == "__main__":
class OptParser(optparse.OptionParser):
def format_epilog(self, formatter):
return self.expand_prog_name(self.epilog)
opt = OptParser(
usage='%prog [OPTION]... SP2B_DIR',
description='Benchmark RDF reading and writing commands\n',
epilog='''
Example:
%prog --max 100000 \\
--run 'rapper -i turtle -o turtle' \\
--run 'riot --output=ttl' \\
--run 'rdfpipe -i turtle -o turtle' /path/to/sp2b/src/
''')
opt.add_option('--max', type='int', default=1000000,
help='maximum triple count')
opt.add_option('--run', type='string', action='append', default=[],
help='additional command to run (input file is appended)')
(options, args) = opt.parse_args()
if len(args) != 1:
opt.print_usage()
sys.exit(1)
progs = ['serdi -b -f -i turtle -o turtle'] + options.run
min_n = options.max / 10
max_n = options.max
step = min_n
gen(str(args[0]), min_n, max_n, step)
run(progs, min_n, max_n, step)
| Python | 0.000001 | |
c206969facfc0e46d7ec4d3f60ce2e6a07956dbd | Use filfinder to get the average radial width of features in the moment 0 | 14B-088/HI/analysis/run_filfinder.py | 14B-088/HI/analysis/run_filfinder.py |
from fil_finder import fil_finder_2D
from basics import BubbleFinder2D
from spectral_cube.lower_dimensional_structures import Projection
from astropy.io import fits
from radio_beam import Beam
from astropy.wcs import WCS
import astropy.units as u
import matplotlib.pyplot as p
'''
Filaments in M33? Why not?
'''
mom0_fits = fits.open("/home/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits")[0]
mom0 = Projection(mom0_fits.data, wcs=WCS(mom0_fits.header))
mom0.meta['beam'] = Beam.from_fits_header(mom0_fits.header)
# Create the bubble mask instead of letting FilFinder to do it.
bub = BubbleFinder2D(mom0, sigma=80.)
fils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)
fils.mask = ~(bub.mask.copy())
fils.medskel()
fils.analyze_skeletons()
# So at least on of the radial profiles fails. BUT the second fit is to a
# skeleton that is essentially the entire disk, so plot without interactivity
# and save the plot and the parameters shown in verbose mode.
p.ioff()
fils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)
# Fit Parameters: [ 541.31726502 129.85351117 180.0710914 304.01262168
# Fit Errors: [ 0.89151974 0.48394493 0.27313627 1.1462345 ]
| Python | 0 | |
da2de3d9d4b36bf2068dbe5b80d785748f532292 | Add __init__.py for the schedule package | pygotham/schedule/__init__.py | pygotham/schedule/__init__.py | """Schedule package."""
| Python | 0 | |
da5fed886d519b271a120820668d21518872f52c | Remove Duplicates from Sorted Array problem | remove_duplicates_from_sorted_array.py | remove_duplicates_from_sorted_array.py | '''
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
'''
'''
Use two pointers. Quite straightforward.
'''
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if len(A) < 2:
return len(A)
p1 = 0
p2 = 1
while p2 < len(A):
while p2 < len(A) and A[p1] == A[p2]:
p2 += 1
p1 += 1
if p2 < len(A):
A[p1] = A[p2]
return p1
if __name__ == '__main__':
s = Solution()
A = [1, 1, 2, 2, 3]
print s.removeDuplicates(A)
print A
A = [1, 1]
print s.removeDuplicates(A)
print A
| Python | 0 | |
e6d247319e7959a6418f8e5b9db949acc93f7d9c | Add the support of EC2 Snapshot | kamboo/snapshot.py | kamboo/snapshot.py | # Copyright (c) 2014, Henry Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from botocore import xform_name
from kamboo.core import KambooConnection
from kamboo.exceptions import KambooException, TooManyRecordsException
from kamboo.utils import compare_list_of_dict, clean_null_items
from kamboo.utils import wait_to_complete
log = logging.getLogger(__name__)
class SnapshotCollection(KambooConnection):
"""
Represents a collection of EC2 Snapshots
"""
def __init__(self, service_name="ec2", region_name=None,
account_id=None, credentials=None):
super(SnapshotCollection, self).__init__(service_name,
region_name,
account_id,
credentials)
def copy_resource(self, source_region, source_id,
description=None):
params = {"source_region": source_region,
"source_snapshot_id": source_id,
"description": description}
r_data = self.conn.copy_snapshot(**clean_null_items(params))
if "SnapshotId" not in r_data:
raise KambooException(
"Fail to copy the Snapshot '%s:%s'" % (source_region,
source_id))
return Snapshot(r_data["SnapshotId"], collection=self)
def wait_to_copy_resource(self, source_region, source_id,
description=None):
snapshot = self.copy_resource(source_region=source_region,
source_id=source_id,
description=description)
return wait_to_complete(resource=snapshot, expected_status="available")
def get_resource_attribute(self, snapshot_id):
"""
Fetch the attribute of the specified EC2 Snapshot
"""
r_data = self.conn.describe_snapshots(snapshot_ids=[snapshot_id])
if "Snapshots" not in r_data:
raise KambooException("No such Snapshot attribute found")
if len(r_data["Snapshots"]) > 1:
raise TooManyRecordsException("More than two Snapshots found")
attr_dict = r_data["Snapshots"][0]
try:
attr_dict.update(
{"Permission": self.get_resource_permission(snapshot_id)})
except Exception as e:
pass
name = ''.join([self.__class__.__name__, "Attribute"])
keys = [xform_name(key) for key in attr_dict.keys()]
return namedtuple(name, keys)(*attr_dict.values())
def get_resource_permission(self, snapshot_id):
"""
Fetch the permission of the specified EC2 Snapshot
"""
r_data = self.conn.describe_snapshot_attribute(
snapshot_id=snapshot_id,
attribute="createVolumePermission")
if "createVolumePermissions" not in r_data:
raise KambooException("No such Snapshot permission found")
return r_data["createVolumePermissions"]
def set_resource_permission(self, id, old, new):
"""
Modify the permission of the specified EC2 Snapshot
"""
permission_diff = compare_list_of_dict(old, new)
params = clean_null_items(permission_diff)
if params:
self.conn.modify_snapshot_attribute(
snapshot_id=id,
create_volume_permission=params)
def get_resource_tags(self, snapshot_id):
"""
Fetch the tags of the specified EC2 Snapshot
"""
r_data = self.conn.describe_tags(resources=[snapshot_id])
if "Tags" not in r_data:
raise KambooException("No such Snapshot tags found")
return r_data["Tags"]
def set_resource_tags(self, snapshot_id, tags=None):
"""
Modify the tags of the specified EC2 Snapshot
"""
r_data = self.conn.create_tags(resources=[snapshot_id], tags=tags)
if "return" in r_data:
if r_data["return"] == "true":
return
raise KambooException("Fail to add tags to the specified Snapshot")
def delete_resource(self, snapshot_id):
"""
Delete the specified EC2 Snapshot
"""
r_data = self.collection.conn.delete_snapshot(snapshot_id=id)
if "return" in r_data:
if r_data["return"] == "true":
return
raise KambooException("Fail to delete the specified Snapshot")
class Snapshot(object):
"""
Represents an EC2 Snapshot
"""
def __init__(self, id, attribute=None, collection=None):
self.id = id
self.collection = collection
self.refresh_resource_attribute(id, attribute)
def __repr__(self):
return 'Snapshot:%s' % self.id
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = self.collection.conn.modify_snapshot_attribute(
snapshot_id=self.id, description=value)
@property
def status(self):
self.refresh_resource_attribute()
return self.state
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self.collection.set_resource_tags(self.id, tags=value)
self._tags = value
@property
def permission(self):
return self._permission
@permission.setter
def permission(self, value):
self.collection.set_resource_permission(
self.id, self._permission, value)
self._permission = value
def refresh_resource_attribute(self, id=None, attribute=None):
if id is None:
id = self.id
if not attribute:
attribute = self.collection.get_resource_attribute(id)
self.__dict__.update(attribute._asdict())
self.id = self.snapshot_id
self._tags = getattr(attribute, "tags", [])
self._desc = getattr(attribute, "description", "")
self._permission = getattr(attribute, "permission", [])
def add_permission(self, account_id):
new_permission = []
new_permission.extend(self.permission)
new_permission.append({"UserId": account_id})
self.permission = new_permission
def delete(self, id=None):
if id is None:
id = self.id
self.collection.delete_resource(id)
| Python | 0.000001 | |
14302f83d755d2319a00db123dab14b300c8c93f | Add python patch script | patch.py | patch.py | import json
import subprocess
# This script will:
# - read current version
# - increment patch version
# - update version in a few places
# - insert new line in ripme.json with message
message = raw_input('message: ')
with open('ripme.json') as dataFile:
ripmeJson = json.load(dataFile)
currentVersion = ripmeJson["latestVersion"]
print 'Current version ' + currentVersion
versionFields = currentVersion.split('.')
patchCur = int(versionFields[2])
patchNext = patchCur + 1
majorMinor = versionFields[:2]
majorMinor.append(str(patchNext))
nextVersion = '.'.join(majorMinor)
print 'Updating to ' + nextVersion
substrExpr = 's/' + currentVersion + '/' + nextVersion + '/'
subprocess.call(['sed', '-i', '-e', substrExpr, 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java'])
subprocess.call(['git', 'grep', 'DEFAULT_VERSION.*' + nextVersion,
'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java'])
substrExpr = 's/\\\"latestVersion\\\": \\\"' + currentVersion + '\\\"/\\\"latestVersion\\\": \\\"' +\
nextVersion + '\\\"/'
subprocess.call(['sed', '-i', '-e', substrExpr, 'ripme.json'])
subprocess.call(['git', 'grep', 'latestVersion', 'ripme.json'])
substrExpr = 's/<version>' + currentVersion + '/<version>' + nextVersion + '/'
subprocess.call(['sed', '-i', '-e', substrExpr, 'pom.xml'])
subprocess.call(['git', 'grep', '<version>' + nextVersion + '</version>', 'pom.xml'])
commitMessage = nextVersion + ': ' + message
changeLogLine = ' \"' + commitMessage + '\",\n'
dataFile = open("ripme.json", "r")
ripmeJsonLines = dataFile.readlines()
ripmeJsonLines.insert(3, changeLogLine)
outputContent = ''.join(ripmeJsonLines)
dataFile.close()
dataFile = open("ripme.json", "w")
dataFile.write(outputContent)
dataFile.close()
subprocess.call(['git', 'add', '-u'])
subprocess.call(['git', 'commit', '-m', commitMessage])
subprocess.call(['git', 'tag', nextVersion])
| Python | 0.000001 | |
048e6960d9e6408ef5dbfad2e32d2d1768ead1da | set P(A) | pb151.py | pb151.py | import math
import time
import random
t1 = time.time()
# A1:16
# A2:8
# A3:4
# A4:2
# A5:1
'''
def getRandom(n):
return random.randint(1,n)
def getbatch(env,l):
i = getRandom(l)-1
t = env[i]
env.pop(i)
if t == 1:
return env
if t == 2:
return env+[1]
if t == 4:
return env+[1,2]
if t == 8:
return env+[1,2,4]
def testweek():
env = [1,2,4,8]
el = 4
count = 0
for i in range(14):
env = getbatch(env,el)
el = len(env)
if el == 1:
count += 1
return count
N = 600000000
total = 0
for i in range(N):
total += testweek()
avg = total/N
k = math.pow(10,6)
print(round(avg*k)/k)
'''
def atone(s):
if s == [1,0,0,0]:
return 0
po = 0
pb = 0
for i in range(4):
if s[i] == 0:
continue
pb += s[i]
t = s[:]
t[i] -= 1
for j in range(i):
t[j] += 1
pt = atone(t)
if sum(t) == 1 and t[0] != 1:
pt += 1
po += s[i]*pt
return po/pb
avg = atone([1,1,1,1])
k = math.pow(10,6)
print(round(avg*k)/k)
print("time:",time.time()-t1)
| Python | 0.999995 | |
f0684a5bb5860c2b9caffefb47dc55781092819e | Add eTools engine | searx/engines/etools.py | searx/engines/etools.py | """
eTools (Web)
@website https://www.etools.ch
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, content
"""
from lxml import html
from searx.engines.xpath import extract_text
from searx.url_utils import quote
from searx.utils import eval_xpath
categories = ['general']
paging = False
language_support = False
safesearch = True
base_url = 'https://www.etools.ch'
search_path = '/searchAdvancedSubmit.do'\
'?query={search_term}'\
'&pageResults=20'\
'&safeSearch={safesearch}'
def request(query, params):
if params['safesearch']:
safesearch = 'true'
else:
safesearch = 'false'
params['url'] = base_url + search_path.format(search_term=quote(query), safesearch=safesearch)
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in eval_xpath(dom, '//table[@class="result"]//td[@class="record"]'):
url = eval_xpath(result, './a/@href')[0]
title = extract_text(eval_xpath(result, './a//text()'))
content = extract_text(eval_xpath(result, './/div[@class="text"]//text()'))
results.append({'url': url,
'title': title,
'content': content})
return results
| Python | 0.000001 | |
4523621d2dd8913cb9c4156bf20e800652318a9d | add whileloop | whileloop.py | whileloop.py | a = 1
while a < 10:
print (a)
a = a+1
| Python | 0.000009 | |
5213db56ab93551360a79d68fc8ecbe98b379b46 | Add validplots.py | validplots.py | validplots.py | """
This script creates validation plots based on statistics collected by validstats.py module.
Usage:
python validplots.py statfile [outdir]
"""
import sys
import os
import pywikibot
import matplotlib.pyplot as plt
FILEDESC = """
== Краткое описание ==
{{Изображение
| Описание = Динамика числа неотпатрулированных страниц в виде графика.
| Источник = собственная работа
| Время создания = 2017
| Автор = [[У:Facenapalm]]
}}
== Лицензирование ==
{{self|CC-Zero}}
"""
LOCAL = False # set it to True to deny file uploading
def main():
argc = len(sys.argv)
if argc == 1:
return
fname = sys.argv[1]
if argc == 2:
tempcat = "."
else:
tempcat = sys.argv[2]
def _filter_date(date):
"""Return x-axis labels based on dates list."""
months = ["январь", "февраль", "март", "апрель", "май", "июнь",
"июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"]
date_parts = date.split("-")
if date_parts[2] != "01":
return ""
if date_parts[1] == "01":
return date_parts[0]
else:
return months[int(date_parts[1]) - 1]
data = [line.strip().split("\t") for line in list(open(fname))[1:]]
data = [list(line) for line in zip(*data)] # transpose data
data[0] = [_filter_date(date) for date in data[0]]
axis = list(range(len(data[0])))
def _plot_pair(title, ufilename, udata, ucolor, ofilename, odata, ocolor):
"""
Make 2 plots with unreviewed and old pages respectively.
If ufilename == ofilename, then make it on single canvas.
"""
utitle = "Динамика числа неотпатрулированных {}".format(title)
otitle = "Динамика числа устаревших {}".format(title)
single = ufilename == ofilename
def _init_plot(title):
"Iternal function for plt initialization."
plt.figure(figsize=(16, 9), dpi=100)
plt.xticks(axis, data[0])
plt.xlabel(title)
def _final_plot(filename):
"Iternal function for plot saving and uploading."
filepath = os.path.join(tempcat, filename)
if single:
plt.legend()
plt.margins(0, 0.02)
plt.subplots_adjust(left=0.05, right=1, top=1, bottom=0.1)
plt.savefig(filepath, bbox_inches="tight")
if not LOCAL:
site = pywikibot.Site()
page = pywikibot.FilePage(site, filename)
page.upload(filepath, comment="Обновление графика.", text=FILEDESC, ignore_warnings=True)
os.remove(filepath)
_init_plot(utitle)
plt.plot(axis, udata, linewidth=2, color=ucolor, label="Неотпатрулированные")
if not single:
_final_plot(ufilename)
_init_plot(otitle)
plt.plot(axis, odata, linewidth=2, color=ocolor, label="Устаревшие")
_final_plot(ofilename)
_plot_pair("статей",
"validation main unrev.png", data[1], "#027495",
"validation main old.png", data[2], "#01A9C1")
_plot_pair("файлов",
"validation files unrev.png", data[3], "#D82488",
"validation files old.png", data[4], "#EC7BCD")
_plot_pair("шаблонов",
"validation templates unrev.png", data[5], "#6A2B00",
"validation templates old.png", data[6], "#AA5A01")
_plot_pair("категорий",
"validation categories.png", data[7], "#FB7E00",
"validation categories.png", data[8], "#FECD42")
_plot_pair("перенаправлений",
"validation redirects unrev.png", data[9], "#427322",
"validation redirects old.png", data[10], "#83A958")
if __name__ == "__main__":
main()
| Python | 0.000162 | |
bd7a84353b298ad14634e5c9a7b442146e9bfeeb | Create __init__.py | kesh/__init__.py | kesh/__init__.py | # Empty __init__.py
| Python | 0.000429 | |
66d7ebe5210669284a335f83e2b8af7392285baa | add holistic video-to-pose | pose_format/utils/holistic.py | pose_format/utils/holistic.py | import mediapipe as mp
import numpy as np
from tqdm import tqdm
from .openpose import hand_colors
from ..numpy.pose_body import NumPyPoseBody
from ..pose import Pose
from ..pose_header import PoseHeaderComponent, PoseHeaderDimensions, PoseHeader
mp_holistic = mp.solutions.holistic
BODY_POINTS = mp_holistic.PoseLandmark._member_names_
BODY_LIMBS = [(int(a), int(b)) for a, b in mp_holistic.POSE_CONNECTIONS]
HAND_POINTS = mp_holistic.HandLandmark._member_names_
HAND_LIMBS = [(int(a), int(b)) for a, b in mp_holistic.HAND_CONNECTIONS]
FACE_POINTS = [str(i) for i in range(468)]
FACE_LIMBS = [(int(a), int(b)) for a, b in mp_holistic.FACE_CONNECTIONS]
def component_points(component, width: int, height: int, num: int):
if component is not None:
lm = component.landmark
return np.array([[p.x * width, p.y * height, p.z] for p in lm]), np.ones(num)
return np.zeros((num, 3)), np.zeros(num)
def body_points(component, width: int, height: int, num: int):
if component is not None:
lm = component.landmark
return np.array([[p.x * width, p.y * height, p.z] for p in lm]), np.array([p.visibility for p in lm])
return np.zeros((num, 3)), np.zeros(num)
def process_holistic(frames: list, fps: float, w: int, h: int, kinect=None, progress=False):
holistic = mp_holistic.Holistic(static_image_mode=False)
datas = []
confs = []
for i, frame in enumerate(tqdm(frames, disable=not progress)):
results = holistic.process(frame)
body_data, body_confidence = body_points(results.pose_landmarks, w, h, 33)
face_data, face_confidence = component_points(results.face_landmarks, w, h, 468)
lh_data, lh_confidence = component_points(results.left_hand_landmarks, w, h, 21)
rh_data, rh_confidence = component_points(results.right_hand_landmarks, w, h, 21)
data = np.concatenate([body_data, face_data, lh_data, rh_data])
conf = np.concatenate([body_confidence, face_confidence, lh_confidence, rh_confidence])
if kinect is not None:
kinect_depth = []
for x, y, z in np.array(data, dtype="int32"):
if 0 < x < w and 0 < y < h:
kinect_depth.append(kinect[i, y, x, 0])
else:
kinect_depth.append(0)
kinect_vec = np.expand_dims(np.array(kinect_depth), axis=-1)
data = np.concatenate([data, kinect_vec], axis=-1)
datas.append(data)
confs.append(conf)
pose_body_data = np.expand_dims(np.stack(datas), axis=1)
pose_body_conf = np.expand_dims(np.stack(confs), axis=1)
return NumPyPoseBody(data=pose_body_data, confidence=pose_body_conf, fps=fps)
def load_holistic(frames: list, fps: float = 24, width=1000, height=1000, depth=0, kinect=None):
pf = "XYZC" if kinect is None else "XYZKC"
Holistic_Hand_Component = lambda name: PoseHeaderComponent(name=name, points=HAND_POINTS,
limbs=HAND_LIMBS, colors=hand_colors, point_format=pf)
Holistic_Components = [
PoseHeaderComponent(name="POSE_LANDMARKS", points=BODY_POINTS, limbs=BODY_LIMBS,
colors=[(255, 0, 0)], point_format=pf),
PoseHeaderComponent(name="FACE_LANDMARKS", points=FACE_POINTS, limbs=FACE_LIMBS,
colors=[(128, 0, 0)], point_format=pf),
Holistic_Hand_Component("LEFT_HAND_LANDMARKS"),
Holistic_Hand_Component("RIGHT_HAND_LANDMARKS"),
]
dimensions = PoseHeaderDimensions(width=width, height=height, depth=depth)
header: PoseHeader = PoseHeader(version=0.1, dimensions=dimensions, components=Holistic_Components)
body: NumPyPoseBody = process_holistic(frames, fps, width, height, kinect)
return Pose(header, body) | Python | 0.000001 | |
9b169bf42bfb2c674460fc317cfb96f929ba0953 | Add tests suite for text modifier tags. | tests/tests_tags/tests_textmodifiers.py | tests/tests_tags/tests_textmodifiers.py | """
SkCode text modifier tags test code.
"""
import unittest
from skcode.etree import TreeNode
from skcode.tags.textmodifiers import TextModifierBaseTagOptions
from skcode.tags import (LowerCaseTextTagOptions,
UpperCaseTextTagOptions,
CapitalizeTextTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class TextModifierTagsTestCase(unittest.TestCase):
""" Tests suite for text modifier tags module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('lowercase', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['lowercase'], LowerCaseTextTagOptions)
self.assertIn('uppercase', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['uppercase'], UpperCaseTextTagOptions)
self.assertIn('capitalize', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['capitalize'], CapitalizeTextTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = TextModifierBaseTagOptions('lowercase')
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertTrue(opts.inline)
self.assertFalse(opts.close_inlines)
self.assertFalse(opts.make_paragraphs_here)
def test_render_html_lowercase(self):
""" Test the ``render_html`` method. """
opts = LowerCaseTextTagOptions()
tree_node = TreeNode(None, 'lowercase', opts)
self.assertEqual('<p class="text-lowercase">test</p>\n', opts.render_html(tree_node, 'test'))
def test_render_html_uppercase(self):
""" Test the ``render_html`` method. """
opts = UpperCaseTextTagOptions()
tree_node = TreeNode(None, 'uppercase', opts)
self.assertEqual('<p class="text-uppercase">test</p>\n', opts.render_html(tree_node, 'test'))
def test_render_html_capitilize(self):
""" Test the ``render_html`` method. """
opts = CapitalizeTextTagOptions()
tree_node = TreeNode(None, 'capitalize', opts)
self.assertEqual('<p class="text-capitalize">test</p>\n', opts.render_html(tree_node, 'test'))
def test_render_text_lowercase(self):
""" Test the ``render_text`` method. """
opts = LowerCaseTextTagOptions()
tree_node = TreeNode(None, 'lowercase', opts)
self.assertEqual('test', opts.render_text(tree_node, 'teST'))
def test_render_text_uppercase(self):
""" Test the ``render_text`` method. """
opts = UpperCaseTextTagOptions()
tree_node = TreeNode(None, 'uppercase', opts)
self.assertEqual('TEST', opts.render_text(tree_node, 'teST'))
def test_render_text_capitilize(self):
""" Test the ``render_text`` method. """
opts = CapitalizeTextTagOptions()
tree_node = TreeNode(None, 'capitalize', opts)
self.assertEqual('Test', opts.render_text(tree_node, 'test'))
def test_render_skcode_lowercase(self):
""" Test the ``render_skcode`` method. """
opts = LowerCaseTextTagOptions()
tree_node = TreeNode(None, 'lowercase', opts)
self.assertEqual('[lowercase]test[/lowercase]', opts.render_skcode(tree_node, 'test'))
def test_render_skcode_uppercase(self):
""" Test the ``render_skcode`` method. """
opts = UpperCaseTextTagOptions()
tree_node = TreeNode(None, 'uppercase', opts)
self.assertEqual('[uppercase]test[/uppercase]', opts.render_skcode(tree_node, 'test'))
def test_render_skcode_capitilize(self):
""" Test the ``render_skcode`` method. """
opts = CapitalizeTextTagOptions()
tree_node = TreeNode(None, 'capitalize', opts)
self.assertEqual('[capitalize]test[/capitalize]', opts.render_skcode(tree_node, 'test'))
| Python | 0 | |
11f47fcad839b198d134f34b4489537360703a07 | Add helpers.py | ckanext/orgdashboards/tests/helpers.py | ckanext/orgdashboards/tests/helpers.py | from ckan.tests import factories
def create_mock_data(**kwargs):
mock_data = {}
mock_data['organization'] = factories.Organization()
mock_data['organization_name'] = mock_data['organization']['name']
mock_data['organization_id'] = mock_data['organization']['id']
mock_data['dataset'] = factories.Dataset(owner_org=mock_data['organization_id'])
mock_data['dataset_name'] = mock_data['dataset']['name']
mock_data['package_id'] = mock_data['dataset']['id']
mock_data['resource'] = factories.Resource(package_id=mock_data['package_id'])
mock_data['resource_name'] = mock_data['resource']['name']
mock_data['resource_id'] = mock_data['resource']['id']
mock_data['resource_view'] = factories.ResourceView(
resource_id=mock_data['resource_id'])
mock_data['resource_view_title'] = mock_data['resource_view']['title']
mock_data['context'] = {
'user': factories._get_action_user_name(kwargs)
}
return mock_data | Python | 0.000015 | |
46e1afd7faae8bd8c62f6b4f5c01322804e68163 | add script to visualize simulation coefficient (us, g, us') | Modules/Biophotonics/python/iMC/script_visualize_simulation_coefficients.py | Modules/Biophotonics/python/iMC/script_visualize_simulation_coefficients.py | '''
Created on Sep 22, 2015
@author: wirkert
'''
import math
import numpy as np
import matplotlib.pyplot as plt
from mc.usuag import UsG
if __name__ == '__main__':
# set up plots
f, axarr = plt.subplots(1, 4)
usplt = axarr[0]
usplt.grid()
usplt.set_xlabel("wavelengths [nm]")
usplt.set_ylabel("us [cm-1]")
usplt.set_title("scattering coefficient")
gplt = axarr[1]
gplt.grid()
gplt.set_xlabel("wavelengths [nm]")
gplt.set_ylabel("g")
gplt.set_title("anisotropy factor")
usrplt = axarr[2]
usrplt.grid()
usrplt.set_xlabel("wavelengths [nm]")
usrplt.set_ylabel("us' [cm-1]")
usrplt.set_title("reduced scattering coefficient")
aniplt = axarr[3]
aniplt.grid()
aniplt.set_xlabel("x = ka = size parameter")
aniplt.set_ylabel("g")
aniplt.set_xscale('log')
aniplt.set_title("anisotropy")
# set up simulation
usg = UsG()
usg.dsp = 0.04
# usg.n_medium = 1.33
# usg.n_particle = 1.40
wavelengths = np.arange(400, 700, 10) * 10 ** -9
plt_range = np.array([0.4 / 2. * 10 ** -6])
# np.linspace(2., 3., 10) * 10 ** -6
# np.array([579. / 2. * 10 ** -9])
# np.linspace(0.1, 0.74, 10) * 10 ** -6
for i, d in enumerate(plt_range):
# set and calculate values
usg.r = d / 2.
us = [usg(w)[0] for w in wavelengths]
g = [usg(w)[1] for w in wavelengths]
g = np.array(g) / np.array(g) * 0.92
# plot stuff
# from blue to red: the color of the plotted curves
plt_color = (1. / float(len(plt_range)) * i,
0.,
1. - (1. / float(len(plt_range)) * i))
# plot scattering coefficient
usplt.plot(wavelengths * 10 ** 9, np.array(us) / 100., color=plt_color)
# plot anisotropy factor
gplt.plot(wavelengths * 10 ** 9, g, color=plt_color)
# plot reduced scattering coefficient
usrplt.plot(wavelengths * 10 ** 9, np.array(us) * (1.0 - np.array(g)) / 100.,
color=plt_color)
aniplt.plot(2. * math.pi * usg.r / wavelengths * usg.n_medium, g)
plt.show()
| Python | 0 | |
b9c9a1f5cfea61050803ecc442232f2f8b4d7011 | Create yaml2json.py | yaml2json.py | yaml2json.py | #!/usr/bin/python
import sys
import yaml
import json
if __name__ == '__main__':
content = yaml.load(sys.stdin)
print json.dumps(content, indent=2)
| Python | 0.000002 | |
7714b3c640a3d6d7fae9dba3496adfddd9354e0e | Add CFFI binding generator | build_wide.py | build_wide.py | import cffi
ffibuilder = cffi.FFI()
ffibuilder.set_source(
'_wide',
r"""
#include "wide.c"
""",
extra_compile_args=['-Werror', '-fno-unwind-tables', '-fomit-frame-pointer'],
)
ffibuilder.cdef(
r"""
typedef uint32_t wp_index;
typedef double wp_number;
wp_index wide_product(
wp_index height,
const wp_number* a_data,
const wp_index* a_indices,
const wp_index* a_indptr,
wp_index a_width,
wp_index a_nnz,
const wp_number* b_data,
const wp_index* b_indices,
const wp_index* b_indptr,
wp_index b_width,
wp_index b_nnz,
wp_number* out_data,
wp_index* out_indices,
wp_index* out_indptr
);
wp_index wide_product_max_nnz(
const wp_index* a_indptr,
const wp_index* b_indptr,
wp_index height
);
""",
)
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
| Python | 0 | |
826a68c7f1f67c3189939c06fcb623b2ede040d9 | migrate from yorm to odm | migrate.py | migrate.py |
import pymongo
import logging
import pprint
import time
from modularodm import StoredObject
from modularodm import fields
from modularodm import storage
from modularodm.query.querydialect import DefaultQueryDialect as Q
from framework.search.model import Keyword as Keyword_YORM
from framework.auth.model import User as User_YORM
from website.project.model import Tag as Tag_YORM
from website.project.model import NodeWikiPage as NodeWikiPage_YORM
from website.project.model import NodeLog as NodeLog_YORM
from website.project.model import NodeFile as NodeFile_YORM
from website.project.model import Node as Node_YORM
client = pymongo.MongoClient()
database = client['migrate']
# Schemas must be migrated in order to preserve relationships. This
# could be implemented using some kind of dependency tracking, but is
# done by hand for now.
migrate_order = [
'Keyword',
'User',
'Tag',
'NodeLog',
'NodeFile',
'Node',
'NodeWikiPage',
]
# Schema definitions
class Keyword(StoredObject):
_id = fields.StringField(primary=True)
type = fields.DictionaryField()
Keyword.set_storage(storage.MongoStorage(database, 'keyword'))
class User(StoredObject):
_id = fields.StringField(primary=True)
username = fields.StringField()
password = fields.StringField()
fullname = fields.StringField()
is_registered = fields.BooleanField()
is_claimed = fields.BooleanField()
verification_key = fields.StringField()
emails = fields.StringField(list=True)
email_verifications = fields.DictionaryField()
aka = fields.StringField(list=True)
date_registered = fields.DateTimeField()#auto_now_add=True)
keywords = fields.ForeignField('keyword', list=True, backref='keyworded')
User.set_storage(storage.MongoStorage(database, 'user'))
class NodeLog(StoredObject):
_id = fields.ObjectIdField(primary=True)
date = fields.DateTimeField()#auto_now=True)
action = fields.StringField()
params = fields.DictionaryField()
user = fields.ForeignField('user', backref='created')
NodeLog.set_storage(storage.MongoStorage(database, 'nodelog'))
class NodeFile(StoredObject):
_id = fields.ObjectIdField(primary=True)
path = fields.StringField()
filename = fields.StringField()
md5 = fields.StringField()
sha = fields.StringField()
size = fields.IntegerField()
content_type = fields.StringField()
is_public = fields.BooleanField()
git_commit = fields.StringField()
is_deleted = fields.BooleanField()
date_created = fields.DateTimeField()#auto_now_add=True)
date_modified = fields.DateTimeField()#auto_now=True)
date_uploaded = fields.DateTimeField()
uploader = fields.ForeignField('user', backref='uploads')
NodeFile.set_storage(storage.MongoStorage(database, 'nodefile'))
class NodeWikiPage(StoredObject):
_id = fields.StringField(primary=True)
page_name = fields.StringField()
version = fields.IntegerField()
date = fields.DateTimeField()#auto_now_add=True)
is_current = fields.BooleanField()
content = fields.StringField()
user = fields.ForeignField('user')
node = fields.ForeignField('node')
NodeWikiPage.set_storage(storage.MongoStorage(database, 'nodewikipage'))
class Tag(StoredObject):
_id = fields.StringField(primary=True)
count_public = fields.IntegerField(default=0)
count_total = fields.IntegerField(default=0)
Tag.set_storage(storage.MongoStorage(database, 'tag'))
class Node(StoredObject):
_id = fields.StringField(primary=True)
is_deleted = fields.BooleanField(default=False)
deleted_date = fields.DateTimeField()
is_registration = fields.BooleanField(default=False)
registered_date = fields.DateTimeField()
is_fork = fields.BooleanField(default=False)
forked_date = fields.DateTimeField()
title = fields.StringField()
description = fields.StringField()
category = fields.StringField()
_terms = fields.DictionaryField(list=True)
files_current = fields.DictionaryField()
files_versions = fields.DictionaryField()
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
contributor_list = fields.DictionaryField(list=True)
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
Node.set_storage(storage.MongoStorage(database, 'node'))
# Migration
def migrate(YORM, ODM):
yorms = YORM.find()
for yorm in yorms:
odm = ODM.load(yorm[ODM._primary_name])
if odm is None:
odm = ODM()
for key, val in yorm.items():
if key == '_doc' or key.startswith('_b_'):
continue
if key not in odm._fields:
continue
setattr(odm, key, val)
# Skip records with missing PK
if isinstance(odm._primary_key, odm._primary_type):
odm.save()
migrate_time = {}
t0_all = time.time()
for schema in migrate_order:
schema_yorm = globals()['%s_YORM' % (schema)]
schema_odm = globals()[schema]
t0_schema = time.time()
migrate(schema_yorm, schema_odm)
migrate_time[schema] = time.time() - t0_schema
migrate_time['ALL'] = time.time() - t0_all
brian = User.find_one(Q('fullname', 'contains', 'Nosek'))
brian_nodes_created = [Node.load(nid) for nid in brian.created['node']['creator']]
brian_nodes_contributed = [Node.load(nid) for nid in brian.contributed['node']['contributors']]
logging.debug(
'Brian has created {} projects.'.format(
len(brian.created['node']['creator'])
)
)
logging.debug(
'Brian has contributed to {} projects.'.format(
len(brian.contributed['node']['contributors'])
)
)
logging.debug(pprint.pprint(migrate_time)) | Python | 0.000005 | |
bc22cd37a62a4e8e9dbfa677a9b3f70b546f1850 | Align dict values | jedihttp/handlers.py | jedihttp/handlers.py | # Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bottle
from bottle import response, request, error
import json
import jedi
import logging
app = bottle.Bottle( __name__ )
logger = logging.getLogger( __name__ )
@app.post( '/healthy' )
def healthy():
return { 'healthy': True }
@app.post( '/ready' )
def ready():
return { 'ready': True }
@app.post( '/completions' )
def completions():
try:
logger.debug( 'received /completions request' )
script = _GetJediScript( request.json )
return {
'completions': [ {
'name': completion.name,
'description': completion.description,
'docstring': completion.docstring(),
'module_path': completion.module_path,
'line': completion.line,
'column': completion.column
} for completion in script.completions() ]
}
except Exception as e:
message = str( e )
logger.debug( 'Exception in /completions: {0}'.format( message ) )
return bottle.HTTPError( 500, message, e )
@app.post( '/gotodefinition' )
def gotodefinition():
try:
logger.debug( 'received /gotodefinition request' )
script = _GetJediScript( request.json )
return {
'definitions': [ {
'module_path': definition.module_path,
'line': definition.line,
'column': definition.column,
'in_builtin_module': definition.in_builtin_module(),
'is_keyword': definition.is_keyword,
'description': definition.description,
'docstring': definition.docstring()
} for definition in script.goto_definitions() ]
}
except Exception as e:
message = str( e )
logger.debug( 'Exception in /gotodefinition: {0}'.format( message ) )
return bottle.HTTPError( 500, message, e )
@app.post( '/gotoassignment' )
def gotoassignments():
try:
logger.debug( 'received /gotoassignment request' )
script = _GetJediScript( request.json )
return {
'definitions': [ {
'module_path': definition.module_path,
'line': definition.line,
'column': definition.column,
'in_builtin_module': definition.in_builtin_module(),
'is_keyword': definition.is_keyword,
'description': definition.description,
'docstring': definition.docstring()
} for definition in script.goto_assignments() ]
}
except Exception as e:
message = str( e )
logger.debug( 'Exception in /gotoassignment: {0}'.format( message ) )
return bottle.HTTPError( 500, message, e )
@app.error()
def error( err ):
return err.body
def _GetJediScript( request_data ):
return jedi.Script( request_data[ 'source' ],
request_data[ 'line' ],
request_data[ 'col' ],
request_data[ 'path' ] )
| # Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bottle
from bottle import response, request, error
import json
import jedi
import logging
app = bottle.Bottle( __name__ )
logger = logging.getLogger( __name__ )
@app.post( '/healthy' )
def healthy():
return { 'healthy': True }
@app.post( '/ready' )
def ready():
return { 'ready': True }
@app.post( '/completions' )
def completions():
try:
logger.debug( 'received /completions request' )
script = _GetJediScript( request.json )
return {
'completions': [ {
'name': completion.name,
'description': completion.description,
'docstring': completion.docstring(),
'module_path': completion.module_path,
'line': completion.line,
'column': completion.column
} for completion in script.completions() ]
}
except Exception as e:
message = str( e )
logger.debug( 'Exception in /completions: {0}'.format( message ) )
return bottle.HTTPError( 500, message, e )
@app.post( '/gotodefinition' )
def gotodefinition():
try:
logger.debug( 'received /gotodefinition request' )
script = _GetJediScript( request.json )
return {
'definitions': [ {
'module_path': definition.module_path,
'line': definition.line,
'column': definition.column,
'in_builtin_module': definition.in_builtin_module(),
'is_keyword': definition.is_keyword,
'description': definition.description,
'docstring': definition.docstring()
} for definition in script.goto_definitions() ]
}
except Exception as e:
message = str( e )
logger.debug( 'Exception in /gotodefinition: {0}'.format( message ) )
return bottle.HTTPError( 500, message, e )
@app.post( '/gotoassignment' )
def gotoassignments():
try:
logger.debug( 'received /gotoassignment request' )
script = _GetJediScript( request.json )
return {
'definitions': [ {
'module_path': definition.module_path,
'line': definition.line,
'column': definition.column,
'in_builtin_module': definition.in_builtin_module(),
'is_keyword': definition.is_keyword,
'description': definition.description,
'docstring': definition.docstring()
} for definition in script.goto_assignments() ]
}
except Exception as e:
message = str( e )
logger.debug( 'Exception in /gotoassignment: {0}'.format( message ) )
return bottle.HTTPError( 500, message, e )
@app.error()
def error( err ):
return err.body
def _GetJediScript( request_data ):
return jedi.Script( request_data[ 'source' ],
request_data[ 'line' ],
request_data[ 'col' ],
request_data[ 'path' ] )
| Python | 0.000001 |
6610483e55f5371d5dcfe06e984f791c3f051e4a | fix InMoov launching button | src/main/resources/resource/Intro/InMoov01_start.py | src/main/resources/resource/Intro/InMoov01_start.py | #########################################
# InMoov01_start.py
# categories: inmoov
# more info @: http://myrobotlab.org/service/InMoov
#########################################
# uncomment for virtual hardware
# Platform.setVirtual(True)
i01 = Runtime.start('i01', 'InMoov2') | Python | 0 | |
9ba00cc698a5ce38d8cfb8eb6e921df0e24525cc | Create netstew.py | netstew.py | netstew.py | #!/opt/anaconda/bin/python2.7
# Print the links to stndard out.
from bs4 import BeautifulSoup
soup = BeautifulSoup(open("index.html"))
for link in soup.find_all('a'):
print(link.get('href'))
| Python | 0.000005 | |
2e3af241d989bf2b62bba5e344240246e8ff516b | add leave module | modules/leave.py | modules/leave.py | class LeaveModule:
def __init__(self, circa):
self.circa = circa
def onload(self):
self.circa.add_listener("cmd.leave", self.leave)
self.circa.add_listener("cmd.goaway", self.leave)
self.circa.add_listener("cmd.quit", self.quit)
def onunload(self):
self.circa.remove_listener("cmd.leave", self.leave)
self.circa.remove_listener("cmd.goaway", self.leave)
self.circa.remove_listener("cmd.quit", self.quit)
def leave(self, fr, to, text):
if self.circa.is_admin(fr) and fr != to:
self.circa.part(to)
def quit(self, fr, to, text):
if self.circa.is_admin(fr):
self.circa.close()
module = LeaveModule
| Python | 0.000001 | |
3411020a0445afcb626e7079ae2f4d17a02d27a0 | Add simple YTid2AmaraID mapper. | map_ytid2amaraid.py | map_ytid2amaraid.py | #!/usr/bin/env python3
import argparse, sys
from pprint import pprint
from amara_api import *
from utils import answer_me
def read_cmd():
"""Function for reading command line options."""
desc = "Program for mapping YouTube IDs to Amara IDs. If given video is not on Amara, it is created."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-l','--lang',dest='lang',required = True, help='Which language do we copy?')
parser.add_argument('-c','--credentials',dest='apifile',default='myapi.txt', help='Text file containing your API key and username on the first line.')
return parser.parse_args()
opts = read_cmd()
infile = opts.input_file
apifile = opts.apifile
lang = opts.lang
# We suppose that the original language is English
if lang == "en":
is_original = True # is lang the original language of the video?
else:
is_original = False
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(infile, "r") as f:
for line in f:
ytids.append(line.split())
# File 'apifile' should contain only one line with your Amara API key and Amara username.
# Amara API can be found in Settins->Account-> API Access (bottom-right corner)
file = open(apifile, "r")
API_KEY, USERNAME = file.read().split()[0:]
print('Using Amara username: '+USERNAME)
#print('Using Amara API key: '+API_KEY)
amara_headers = {
'Content-Type': 'application/json',
'X-api-username': USERNAME,
'X-api-key': API_KEY,
'format': 'json'
}
if len(ytids) < 20: # Do not print for large inputs
print("This is what I got from the input file:")
print(ytids)
answer = answer_me("Should I proceed?")
if not answer:
sys.exit(1)
# Main loop
for i in range(len(ytids)):
ytid_from = ytids[i][0]
sys.stdout.flush()
sys.stderr.flush()
video_url = 'https://www.youtube.com/watch?v='+ytid_from
# Now check whether the video is already on Amara
# If not, create it.
amara_response = check_video( video_url, amara_headers)
if amara_response['meta']['total_count'] == 0:
amara_response = add_video(video_url, lang, amara_headers)
amara_id = amara_response['id']
amara_title = amara_response['title']
print(ytid_from, AMARA_BASE_URL+'cs/subtitles/editor/'+amara_id+'/'+lang)
else:
amara_id = amara_response['objects'][0]['id']
amara_title = amara_response['objects'][0]['title']
print(ytid_from, AMARA_BASE_URL+'cs/subtitles/editor/'+amara_id+'/'+lang)
| Python | 0 | |
32dffa922c1aedadf5cc87685d5ff5226fa306e9 | add status.py and adjust the code | SMlib/widgets/status.py | SMlib/widgets/status.py | # -*- coding: utf-8 -*-
#
# Copyright © 2012 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see SMlib/__init__.py for details)
"""Status bar widgets"""
from PyQt4.QtGui import QWidget, QHBoxLayout, QLabel
from PyQt4.QtCore import QTimer, SIGNAL
# Local import
from SMlib.configs.baseconfig import _
from SMlib.configs.guiconfig import get_font
class StatusBarWidget(QWidget):
def __init__(self, parent, statusbar):
QWidget.__init__(self, parent)
self.label_font = font = get_font('editor')
font.setPointSize(self.font().pointSize())
font.setBold(True)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
statusbar.addPermanentWidget(self)
#==============================================================================
# Main window-related status bar widgets
#==============================================================================
class BaseTimerStatus(StatusBarWidget):
TITLE = None
TIP = None
def __init__(self, parent, statusbar):
StatusBarWidget.__init__(self, parent, statusbar)
self.setToolTip(self.TIP)
layout = self.layout()
layout.addWidget(QLabel(self.TITLE))
self.label = QLabel()
self.label.setFont(self.label_font)
layout.addWidget(self.label)
layout.addSpacing(20)
if self.is_supported():
self.timer = QTimer()
self.connect(self.timer, SIGNAL('timeout()'), self.update_label)
self.timer.start(2000)
else:
self.timer = None
self.hide()
def set_interval(self, interval):
"""Set timer interval (ms)"""
if self.timer is not None:
self.timer.setInterval(interval)
def import_test(self):
"""Raise ImportError if feature is not supported"""
raise NotImplementedError
def is_supported(self):
"""Return True if feature is supported"""
try:
self.import_test()
return True
except ImportError:
return False
def get_value(self):
"""Return value (e.g. CPU or memory usage)"""
raise NotImplementedError
def update_label(self):
"""Update status label widget, if widget is visible"""
if self.isVisible():
self.label.setText('%d %%' % self.get_value())
class MemoryStatus(BaseTimerStatus):
TITLE = _("Memory:")
TIP = _("Memory usage status: "
"requires the `psutil` (>=v0.3) library on non-Windows platforms")
def import_test(self):
"""Raise ImportError if feature is not supported"""
from SMlib.utils.system import memory_usage # analysis:ignore
def get_value(self):
"""Return memory usage"""
from SMlib.utils.system import memory_usage
return memory_usage()
class CPUStatus(BaseTimerStatus):
TITLE = _("CPU:")
TIP = _("CPU usage status: requires the `psutil` (>=v0.3) library")
def import_test(self):
"""Raise ImportError if feature is not supported"""
from SMlib.utils import programs
if not programs.is_module_installed('psutil', '>=0.2.0'):
# The `interval` argument in `psutil.cpu_percent` function
# was introduced in v0.2.0
raise ImportError
def get_value(self):
"""Return CPU usage"""
import psutil
return psutil.cpu_percent(interval=0)
#==============================================================================
# Editor-related status bar widgets
#==============================================================================
class ReadWriteStatus(StatusBarWidget):
def __init__(self, parent, statusbar):
StatusBarWidget.__init__(self, parent, statusbar)
layout = self.layout()
layout.addWidget(QLabel(_("Permissions:")))
self.readwrite = QLabel()
self.readwrite.setFont(self.label_font)
layout.addWidget(self.readwrite)
layout.addSpacing(20)
def readonly_changed(self, readonly):
readwrite = "R" if readonly else "RW"
self.readwrite.setText(readwrite.ljust(3))
class EOLStatus(StatusBarWidget):
def __init__(self, parent, statusbar):
StatusBarWidget.__init__(self, parent, statusbar)
layout = self.layout()
layout.addWidget(QLabel(_("End-of-lines:")))
self.eol = QLabel()
self.eol.setFont(self.label_font)
layout.addWidget(self.eol)
layout.addSpacing(20)
def eol_changed(self, os_name):
os_name = unicode(os_name)
self.eol.setText({"nt": "CRLF", "posix": "LF"}.get(os_name, "CR"))
class EncodingStatus(StatusBarWidget):
def __init__(self, parent, statusbar):
StatusBarWidget.__init__(self, parent, statusbar)
layout = self.layout()
layout.addWidget(QLabel(_("Encoding:")))
self.encoding = QLabel()
self.encoding.setFont(self.label_font)
layout.addWidget(self.encoding)
layout.addSpacing(20)
def encoding_changed(self, encoding):
self.encoding.setText(str(encoding).upper().ljust(15))
class CursorPositionStatus(StatusBarWidget):
def __init__(self, parent, statusbar):
StatusBarWidget.__init__(self, parent, statusbar)
layout = self.layout()
layout.addWidget(QLabel(_("Line:")))
self.line = QLabel()
self.line.setFont(self.label_font)
layout.addWidget(self.line)
layout.addWidget(QLabel(_("Column:")))
self.column = QLabel()
self.column.setFont(self.label_font)
layout.addWidget(self.column)
self.setLayout(layout)
def cursor_position_changed(self, line, index):
self.line.setText("%-6d" % (line+1))
self.column.setText("%-4d" % (index+1))
def test():
from PyQt4.QtGui import QMainWindow
from SMlib.utils.qthelpers import qapplication
app = qapplication()
win = QMainWindow()
win.setWindowTitle("Status widgets test")
win.resize(900, 300)
statusbar = win.statusBar()
swidgets = []
for klass in (ReadWriteStatus, EOLStatus, EncodingStatus,
CursorPositionStatus, MemoryStatus, CPUStatus):
swidget = klass(win, statusbar)
swidgets.append(swidget)
win.show()
app.exec_()
if __name__ == "__main__":
test()
| Python | 0 | |
9fef390248387e02498d18ab7bba5b23e3632c7b | Add missing file | api/constants.py | api/constants.py | QUERY_PARAM_QUERY = 'q'
QUERY_PARAM_SORT = 's'
QUERY_PARAM_SIZE = 'size'
QUERY_PARAM_PAGE = 'page'
QUERY_PARAM_FIELDS = 'fields'
QUERY_PARAM_OFFSET = 'offset'
QUERY_PARAM_INCLUDE = 'include'
QUERY_PARAM_EXCLUDE = 'exclude'
QUERY_PARAM_WAIT_UNTIL_COMPLETE = 'wuc'
| Python | 0.000006 | |
8f9c979fc2936d53321a377c67cbf2e3b4667f95 | Create status_light.py | status_light.py | status_light.py | import time
class StatusLight(object):
"""available patterns for the status light"""
patterns = {
'blink_fast' : (.1, [False, True]),
'blink' : (.5, [False, True]),
}
"""placeholder for pattern to tenmporarily interrupt
status light with different pattern"""
interrupt_pattern = [0, []]
"""continue flashing, controlled by the stop"""
cont = True
def interrupt(self, action, repeat = 1):
"""Interupt the current status of the light with a names action
parameters: action the name of the action
repeat: the number of times to repeatthe interruption"""
self.interrupt_pattern[0] = self.patterns[action][0]
for i in range(0, repeat):
self.interrupt_pattern[1].extend(list(self.patterns[action][1][:]))
def do(self, action):
"""Perform a status light action
paramaters: action: the name of tehe action"""
if(len(self.interrupt_pattern[1])):
# if the interrupt_pattern is not empty, prioritize it
time.sleep(self.interrupt_pattern[0])
self.set_state(self.interrupt_pattern[1].pop(0))
return self.do(action)
for state in self.patterns[action][1]:
# peform the regular action when not interrupted
time.sleep(self.patterns[action][0])
self.set_state(state)
if self.cont:
# continue of not stopped
self.do(action)
def off(self, state):
"""Turn off status light"""
self.cont = False
self.set_state(state)
def set_state(self, state):
"""Turn the light on or off"""
print 'set state to %s' % state
if __name__ == '__main__':
light = StatusLight()
light.interrupt('blink_fast', 3)
light.do('blink')
| Python | 0.000001 | |
dcced707c40c6a970d19dfca496dc86e38e8ea3c | Increments version to 0.2.2 | deltas/__init__.py | deltas/__init__.py | from .apply import apply
from .operations import Operation, Insert, Delete, Equal
from .algorithms import segment_matcher, SegmentMatcher
from .algorithms import sequence_matcher, SequenceMatcher
from .tokenizers import Tokenizer, RegexTokenizer, text_split, wikitext_split
from .segmenters import Segmenter, Segment, MatchableSegment
__version__ = "0.2.2"
| from .apply import apply
from .operations import Operation, Insert, Delete, Equal
from .algorithms import segment_matcher, SegmentMatcher
from .algorithms import sequence_matcher, SequenceMatcher
from .tokenizers import Tokenizer, RegexTokenizer, text_split, wikitext_split
from .segmenters import Segmenter, Segment, MatchableSegment
__version__ = "0.2.1"
| Python | 0.999289 |
a4ad0ffbda8beb4c2ea4ef0d181ec9ef0de3d1e1 | add the md5 by python | SystemInfo/1_hashlib.py | SystemInfo/1_hashlib.py | #!/usr/bin/python
#-*- coding:utf-8 -*-
import hashlib
import sys
def md5sum(f):
m = hashlib.md5()
with open(f) as fd:
while True:
data = fd.read(4096)
if data:
m.update(data)
else:
break
return m.hexdigest()
if __name__ == '__main__':
print md5sum(sys.argv[1])
| Python | 0.000218 | |
0abb8f6d266408f20c751726460ae2d87f307583 | solve 1 problem | solutions/factorial-trailing-zeroes.py | solutions/factorial-trailing-zeroes.py | #!/usr/bin/env python
# encoding: utf-8
"""
factorial-trailing-zeroes.py
Created by Shuailong on 2016-02-21.
https://leetcode.com/problems/factorial-trailing-zeroes/.
"""
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
count = 0
max_iter = 15 # 5**14 > max_int
for fact in range(1, max_iter):
count += n / 5 ** fact
return count
def main():
solution = Solution()
n = 25
for n in range(1, 100):
print solution.trailingZeroes2(n), solution.trailingZeroes(n)
if __name__ == '__main__':
main()
| Python | 0.000027 | |
c8fa91104d712bf2743b07b5edd5f38a040d6507 | Add unit tests for invoke_post_run | st2common/tests/unit/test_runners_utils.py | st2common/tests/unit/test_runners_utils.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from st2common.runners import utils
from st2common.services import executions as exe_svc
from st2common.util import action_db as action_db_utils
from st2tests import base
from st2tests import fixturesloader
from st2tests import config as tests_config
tests_config.parse_args()
FIXTURES_PACK = 'generic'
TEST_FIXTURES = {
'liveactions': ['liveaction1.yaml'],
'actions': ['local.yaml'],
'executions': ['execution1.yaml'],
'runners': ['run-local.yaml']
}
class RunnersUtilityTests(base.CleanDbTestCase):
def __init__(self, *args, **kwargs):
super(RunnersUtilityTests, self).__init__(*args, **kwargs)
self.models = None
def setUp(self):
super(RunnersUtilityTests, self).setUp()
loader = fixturesloader.FixturesLoader()
self.models = loader.save_fixtures_to_db(
fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES
)
self.liveaction_db = self.models['liveactions']['liveaction1.yaml']
exe_svc.create_execution_object(self.liveaction_db)
self.action_db = action_db_utils.get_action_by_ref(self.liveaction_db.action)
@mock.patch.object(action_db_utils, 'get_action_by_ref', mock.MagicMock(return_value=None))
def test_invoke_post_run_action_provided(self):
utils.invoke_post_run(self.liveaction_db, action_db=self.action_db)
action_db_utils.get_action_by_ref.assert_not_called()
def test_invoke_post_run_action_exists(self):
utils.invoke_post_run(self.liveaction_db)
@mock.patch.object(action_db_utils, 'get_action_by_ref', mock.MagicMock(return_value=None))
@mock.patch.object(action_db_utils, 'get_runnertype_by_name', mock.MagicMock(return_value=None))
def test_invoke_post_run_action_does_not_exist(self):
utils.invoke_post_run(self.liveaction_db)
action_db_utils.get_action_by_ref.assert_called_once()
action_db_utils.get_runnertype_by_name.assert_not_called()
| Python | 0 | |
8977f320979998c9f18cfa7629c1811c7082dddf | Add setup.py (sigh) | setup.py | setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="webxpath", # Replace with your own username
version="0.0.2",
author="Shiplu Mokaddim",
author_email="shiplu@mokadd.im",
description="Run XPath query and expressions against websites",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shiplu/webxpath",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License 2.0",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| Python | 0 | |
9af2c53af417295842f8ae329a8bb8abc99f693d | add setup.py file | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(
name = 's7n-blog',
version = "1a1",
packages = ['s7n', 's7n.blog'],
)
| Python | 0.000001 | |
e1be390ab7a90d1efdb75a0b2e04c6414645a23c | Create setup.py | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
setup(
name='pdsspect',
version='0.1.0',
description="PDS Image Viewer",
long_description=readme + '\n\n' + history,
author="PlanetaryPy Developers",
author_email='contact@planetarypy.com',
url='https://github.com/planetarypy/pdsspect',
packages=[
'pdsspect',
],
package_dir={'pdsspect':
'pdsspect'},
include_package_data=True,
install_requires=[
'ginga==2.6.0',
'planetaryimage>=0.5.0',
'matplotlib>=1.5.1',
'QtPy>=1.2.1'
],
license="BSD",
zip_safe=False,
keywords='pdsspect',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
entry_points={
'console_scripts': [
'pdsspect = pdsspect.pdsspect:cli'
],
}
)
| Python | 0.000001 | |
18c0682306ee383d0eaad467d8fd7c9f74bb6e4f | add setup.py | setup.py | setup.py | #!/usr/bin/env python
# encoding: utf-8
from setuptools import setup # , find_packages
setup(
name='pyoptwrapper',
version='1.0',
description='wrapper to pyopt',
author='Andrew Ning',
author_email='aning@byu.edu',
py_modules=['pyoptwrapper'],
license='Apache License, Version 2.0',
zip_safe=False
) | Python | 0.000001 | |
a03fa3d725f296d3fa3fda323171924671ec65c0 | add setup.py for setuptools support | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='mtools',
version='1.0.0',
packages=find_packages(),
scripts=['scripts/mlaunch','scripts/mlog2json','scripts/mlogdistinct',
'scripts/mlogfilter','scripts/mlogmerge','scripts/mlogversion',
'scripts/mlogvis','scripts/mplotqueries'],
include_package_data=True,
author='Thomas Rueckstiess',
author_email='thomas@rueckstiess.net',
url='https://github.com/rueckstiess/mtools',
description='Useful scripts to parse and visualize MongoDB log files.',
) | Python | 0 | |
b106d4fdaf1667061879dd170ddeec1bde2042aa | Add setup.py. | setup.py | setup.py | from distutils.core import setup
setup(name='twittytwister',
version='0.1',
description='Twitter client for Twisted Python',
author='Dustin Sallings',
author_email='dustin@spy.net',
url='http://github.com/dustin/twitty-twister/',
license='MIT',
platforms='any',
packages=['twittytwister'],
)
| Python | 0 | |
3ab1c4c28f816d0c6495ce5d7b14b854ec77f754 | Setting version number to 0.2.0 | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
class UltraMagicString(object):
'''
Taken from
http://stackoverflow.com/questions/1162338/whats-the-right-way-to-use-unicode-metadata-in-setup-py
'''
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __unicode__(self):
return self.value.decode('UTF-8')
def __add__(self, other):
return UltraMagicString(self.value + str(other))
def split(self, *args, **kw):
return self.value.split(*args, **kw)
long_description = UltraMagicString(file('README').read())
setup(
name = 'django-autofixture',
version = '0.2.0',
url = 'https://launchpad.net/django-autofixture',
license = 'BSD',
description = 'Provides tools to auto generate test data.',
long_description = long_description,
author = UltraMagicString('Gregor Müllegger'),
author_email = 'gregor@muellegger.de',
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe = False,
install_requires = ['setuptools'],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
class UltraMagicString(object):
'''
Taken from
http://stackoverflow.com/questions/1162338/whats-the-right-way-to-use-unicode-metadata-in-setup-py
'''
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __unicode__(self):
return self.value.decode('UTF-8')
def __add__(self, other):
return UltraMagicString(self.value + str(other))
def split(self, *args, **kw):
return self.value.split(*args, **kw)
long_description = UltraMagicString(file('README').read())
setup(
name = 'django-autofixture',
version = '0.2.0pre1',
url = 'https://launchpad.net/django-autofixture',
license = 'BSD',
description = 'Provides tools to auto generate test data.',
long_description = long_description,
author = UltraMagicString('Gregor Müllegger'),
author_email = 'gregor@muellegger.de',
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe = False,
install_requires = ['setuptools'],
)
| Python | 0.999493 |
dcedefebd46a80f18372e045e3e4869bb4c88d89 | Remove all tests from setup.py except those of gloo.gl | setup.py | setup.py | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" Vispy setup script.
Steps to do a new release:
Preparations:
* Test on Windows, Linux, Mac
* Make release notes
* Update API documentation and other docs that need updating.
Test installation:
* clear the build and dist dir (if they exist)
* python setup.py register -r http://testpypi.python.org/pypi
* python setup.py sdist upload -r http://testpypi.python.org/pypi
* pip install -i http://testpypi.python.org/pypi
Define the version:
* update __version__ in __init__.py
* Tag the tip changeset as version x.x
Generate and upload package (preferably on Windows)
* python setup.py register
* python setup.py sdist upload
* python setup.py bdist_wininst upload
Announcing:
* It can be worth waiting a day for eager users to report critical bugs
* Announce in scipy-user, vispy mailing list, G+
"""
import os
from os import path as op
try:
# use setuptools namespace, allows for "develop"
import setuptools # noqa, analysis:ignore
except ImportError:
pass # it's not essential for installation
from distutils.core import setup
name = 'vispy'
description = 'Interactive visualization in Python'
# Get version and docstring
__version__ = None
__doc__ = ''
docStatus = 0 # Not started, in progress, done
initFile = os.path.join(os.path.dirname(__file__), 'vispy', '__init__.py')
for line in open(initFile).readlines():
if (line.startswith('__version__')):
exec(line.strip())
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
__doc__ += line
setup(
name=name,
version=__version__,
author='Vispy contributors',
author_email='vispy@googlegroups.com',
license='(new) BSD',
url='http://vispy.org',
download_url='https://pypi.python.org/pypi/vispy',
keywords="visualization OpenGl ES medical imaging 3D plotting "
"numpy bigdata",
description=description,
long_description=__doc__,
platforms='any',
provides=['vispy'],
install_requires=[
'numpy',
'PyOpenGl'],
packages=[
'vispy',
'vispy.app', #'vispy.app.tests',
'vispy.app.backends',
'vispy.gloo', #'vispy.gloo.tests',
'vispy.gloo.gl', 'vispy.gloo.gl.tests',
'vispy.scene', #'vispy.scene.tests',
'vispy.scene.systems',
'vispy.scene.entities',
'vispy.scene.cameras',
'vispy.shaders',
'vispy.util', #'vispy.util.tests',
'vispy.util.dataio',
'vispy.visuals',
],
package_dir={
'vispy': 'vispy'},
package_data={
'vispy': [op.join('data', '*'),
op.join('app', 'tests', 'qt-designer.ui')]},
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Visualization',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
| # -*- coding: utf-8 -*-
# Copyright (c) 2013, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" Vispy setup script.
Steps to do a new release:
Preparations:
* Test on Windows, Linux, Mac
* Make release notes
* Update API documentation and other docs that need updating.
Test installation:
* clear the build and dist dir (if they exist)
* python setup.py register -r http://testpypi.python.org/pypi
* python setup.py sdist upload -r http://testpypi.python.org/pypi
* pip install -i http://testpypi.python.org/pypi
Define the version:
* update __version__ in __init__.py
* Tag the tip changeset as version x.x
Generate and upload package (preferably on Windows)
* python setup.py register
* python setup.py sdist upload
* python setup.py bdist_wininst upload
Announcing:
* It can be worth waiting a day for eager users to report critical bugs
* Announce in scipy-user, vispy mailing list, G+
"""
import os
from os import path as op
try:
# use setuptools namespace, allows for "develop"
import setuptools # noqa, analysis:ignore
except ImportError:
pass # it's not essential for installation
from distutils.core import setup
name = 'vispy'
description = 'Interactive visualization in Python'
# Get version and docstring
__version__ = None
__doc__ = ''
docStatus = 0 # Not started, in progress, done
initFile = os.path.join(os.path.dirname(__file__), 'vispy', '__init__.py')
for line in open(initFile).readlines():
if (line.startswith('__version__')):
exec(line.strip())
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
__doc__ += line
setup(
name=name,
version=__version__,
author='Vispy contributors',
author_email='vispy@googlegroups.com',
license='(new) BSD',
url='http://vispy.org',
download_url='https://pypi.python.org/pypi/vispy',
keywords="visualization OpenGl ES medical imaging 3D plotting "
"numpy bigdata",
description=description,
long_description=__doc__,
platforms='any',
provides=['vispy'],
install_requires=[
'numpy',
'PyOpenGl'],
packages=[
'vispy',
'vispy.app', 'vispy.app.tests',
'vispy.app.backends',
'vispy.gloo', 'vispy.gloo.tests',
'vispy.gloo.gl', 'vispy.gloo.gl.tests',
'vispy.scene', 'vispy.scene.tests',
'vispy.scene.systems',
'vispy.scene.entities',
'vispy.scene.cameras',
'vispy.shaders',
'vispy.util', 'vispy.util.tests',
'vispy.util.dataio',
'vispy.visuals',
],
package_dir={
'vispy': 'vispy'},
package_data={
'vispy': [op.join('data', '*'),
op.join('app', 'tests', 'qt-designer.ui')]},
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Visualization',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
| Python | 0 |
e0efdff7380101437c75ce6a50dd93302a3315e2 | Increase version dependency. | setup.py | setup.py | from setuptools import setup, find_packages
version='0.9'
setup(
name='pyres',
version=version,
description='Python resque clone',
author='Matt George',
author_email='mgeorge@gmail.com',
maintainer='Matt George',
license='MIT',
url='http://github.com/binarydud/pyres',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
download_url='http://cloud.github.com/downloads/binarydud/pyres/pyres-%s.tar.gz' % version,
include_package_data=True,
package_data={'resweb': ['templates/*.mustache','media/*']},
scripts=[
'scripts/pyres_worker',
'scripts/pyres_web',
'scripts/pyres_scheduler',
'scripts/pyres_manager'],
install_requires=[
'simplejson>=2.0.9',
'itty>=0.6.2',
'redis>=1.34.1',
'pystache>=0.1.0',
'setproctitle==1.0'
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
)
| from setuptools import setup, find_packages
version='0.9'
setup(
name='pyres',
version=version,
description='Python resque clone',
author='Matt George',
author_email='mgeorge@gmail.com',
maintainer='Matt George',
license='MIT',
url='http://github.com/binarydud/pyres',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
download_url='http://cloud.github.com/downloads/binarydud/pyres/pyres-%s.tar.gz' % version,
include_package_data=True,
package_data={'resweb': ['templates/*.mustache','media/*']},
scripts=[
'scripts/pyres_worker',
'scripts/pyres_web',
'scripts/pyres_scheduler',
'scripts/pyres_manager'],
install_requires=[
'simplejson>=2.0.9',
'itty>=0.6.2',
'redis==1.34.1',
'pystache>=0.1.0',
'setproctitle==1.0'
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
) | Python | 0 |
8781799d2511dbafa7b11f2f8fb45356031a619b | Bump the sqlalchemy-citext version requirement | setup.py | setup.py | #!/usr/bin/env python
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from setuptools import setup, find_packages
about = {}
with open("warehouse/__about__.py") as fp:
exec(fp.read(), about)
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=open("README.rst").read(),
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
packages=find_packages(),
package_data={
"warehouse": ["*.yml"],
"warehouse.legacy": ["templates/*.html"],
"warehouse.migrations": ["*.mako", "versions/*.py"],
},
install_requires=[
"alembic",
"Jinja2",
"psycopg2cffi-compat>=1.1",
"PyYAML",
"six",
"SQLAlchemy",
"sqlalchemy-citext>=1.2.0",
"Werkzeug",
],
entry_points={
"console_scripts": [
"warehouse = warehouse.__main__:main",
],
},
zip_safe=False,
)
| #!/usr/bin/env python
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from setuptools import setup, find_packages
about = {}
with open("warehouse/__about__.py") as fp:
exec(fp.read(), about)
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=open("README.rst").read(),
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
packages=find_packages(),
package_data={
"warehouse": ["*.yml"],
"warehouse.legacy": ["templates/*.html"],
"warehouse.migrations": ["*.mako", "versions/*.py"],
},
install_requires=[
"alembic",
"Jinja2",
"psycopg2cffi-compat>=1.1",
"PyYAML",
"six",
"SQLAlchemy",
"sqlalchemy-citext>=1.1.0",
"Werkzeug",
],
entry_points={
"console_scripts": [
"warehouse = warehouse.__main__:main",
],
},
zip_safe=False,
)
| Python | 0.000001 |
18d899f36a140e677637118039e245127b0d138a | remove the long description | setup.py | setup.py | from setuptools import setup, find_packages
from tvrenamr import get_version
setup(
name = 'tvrenamr',
version = get_version(),
description = 'Rename tv show files using online databases',
author = 'George Hickman',
author_email = 'george@ghickman.co.uk',
url = 'http://github.com/ghickman/tvrenamr',
license = 'MIT',
packages = find_packages(exclude=['tests']),
entry_points = {'console_scripts': ['tvr = tvrenamr.tvrenamr:run',],},
classifiers = [
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Multimedia',
'Topic :: Utilities',
'Natural Language :: English'],
install_requires = ('lxml', 'pyyaml',)
)
| from os.path import dirname, join
from setuptools import setup, find_packages
from tvrenamr import get_version
def fread(fname):
return open(join(dirname(__file__), fname)).read()
setup(
name = 'tvrenamr',
version = get_version(),
description = 'Rename tv show files using online databases',
long_description = fread('README.markdown'),
author = 'George Hickman',
author_email = 'george@ghickman.co.uk',
url = 'http://github.com/ghickman/tvrenamr',
license = 'MIT',
packages = find_packages(exclude=['tests']),
entry_points = {'console_scripts': ['tvr = tvrenamr.tvrenamr:run',],},
classifiers = [
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Multimedia',
'Topic :: Utilities',
'Natural Language :: English'],
install_requires = ('lxml', 'pyyaml',)
)
| Python | 1 |
934e73247156b28d919957d738d8a5b03e403160 | Add setup.py. | setup.py | setup.py | """
setup.py for simple_img_gallery.
"""
from distutils.core import setup
setup(name="simple_img_gallery",
version="0.0.1",
description="Simple image gallery generation.",
author="Pete Florence",
author_email="",
url="https://github.com/peteflorence/simple_img_gallery",
scripts=['generate_gallery.py'])
| Python | 0 | |
ff5c68ccd566ba388f919bb663c5055685be3070 | Add initial setup.py | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
setup(
name='mdx_picture',
version='1.0',
author='Artem Grebenkin',
author_email='speechkey@gmail.com',
description='Python-Markdown extension supports the <picture> tag.',
url='http://www.artemgrebenkin.com/',
py_modules=['mdx_picture'],
install_requires=['Markdown>=2.0'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Markup :: HTML'
]
)
| Python | 0.000001 | |
6972c0a6fc0431c7e41b110ea8c41dd9a4ed076c | Add distutils setup script | setup.py | setup.py | #!/usr/bin/env python3
from distutils.core import setup
setup(
name='python-fsb5',
version='1.0',
author='Simon Pinfold',
author_email='simon@uint8.me',
description='Library and to extract audio from FSB5 (FMOD Sample Bank) files',
download_url='https://github.com/synap5e/python-fsb5/tarball/master',
license='MIT',
url='https://github.com/synap5e/python-fsb5',
)
| Python | 0 | |
0bf30432084a5b6e71ea2ac36af165f7c4cee133 | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
setup(name='acapi',
version='0.1',
description='Acquia Cloud API client.',
author='Dave Hall',
author_email='me@davehall.com.au',
url='http://github.com/skwashd/python-acquia-cloud',
install_requires=['httplib2==0.9', 'simplejson==3.5.3', 'six==1.7.3'],
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Internet',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
packages=[
'acapi',
'acapi.compat',
'acapi.resources',
],
)
| Python | 0.000001 | |
28970e7d54186e1bf360cb91389b9ba6b3df4679 | Add script to validate mvn repositories | dev-tools/validate-maven-repository.py | dev-tools/validate-maven-repository.py | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Helper python script to check if a sonatype staging repo contains
# all the required files compared to a local repository
#
# The script does the following steps
#
# 1. Scans the local maven repo for all files in /org/elasticsearch
# 2. Opens a HTTP connection to the staging repo
# 3. Executes a HEAD request for each file found in step one
# 4. Compares the content-length response header with the real file size
# 5. Return an error if those two numbers differ
#
# A pre requirement to run this, is to find out via the oss.sonatype.org web UI, how that repo is named
# - After logging in you go to 'Staging repositories' and search for the one you just created
# - Click into the `Content` tab
# - Open any artifact (not a directory)
# - Copy the link of `Repository Path` on the right and reuse that part of the URL
#
# Alternatively you can just use the name of the repository and reuse the rest (ie. the repository
# named for the example below would have been named orgelasticsearch-1012)
#
#
# Example call
# python dev-tools/validate-maven-repository.py /path/to/repo/org/elasticsearch/ \
# https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch
import sys
import os
import httplib
import urlparse
import re
# Draw a simple progress bar, a couple of hundred HEAD requests might take a while
# Note, when drawing this, it uses the carriage return character, so you should not
# write anything in between
def drawProgressBar(percent, barLen = 40):
sys.stdout.write("\r")
progress = ""
for i in range(barLen):
if i < int(barLen * percent):
progress += "="
else:
progress += " "
sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100))
sys.stdout.flush()
if __name__ == "__main__":
if len(sys.argv) != 3:
print 'Usage: %s <localRep> <stagingRepo> [user:pass]' % (sys.argv[0])
print ''
print 'Example: %s /tmp/my-maven-repo/org/elasticsearch https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch' % (sys.argv[0])
else:
sys.argv[1] = re.sub('/$', '', sys.argv[1])
sys.argv[2] = re.sub('/$', '', sys.argv[2])
localMavenRepo = sys.argv[1]
endpoint = sys.argv[2]
filesToCheck = []
foundSignedFiles = False
for root, dirs, files in os.walk(localMavenRepo):
for file in files:
# no metadata files (they get renamed from maven-metadata-local.xml to maven-metadata.xml while deploying)
# no .properties and .repositories files (they dont get uploaded)
if not file.startswith('maven-metadata') and not file.endswith('.properties') and not file.endswith('.repositories'):
filesToCheck.append(os.path.join(root, file))
if file.endswith('.asc'):
foundSignedFiles = True
print "Need to check %i files" % len(filesToCheck)
if not foundSignedFiles:
print '### Warning: No signed .asc files found'
# set up http
parsed_uri = urlparse.urlparse(endpoint)
domain = parsed_uri.netloc
if parsed_uri.scheme == 'https':
conn = httplib.HTTPSConnection(domain)
else:
conn = httplib.HTTPConnection(domain)
#conn.set_debuglevel(5)
drawProgressBar(0)
errors = []
for idx, file in enumerate(filesToCheck):
request_uri = parsed_uri.path + file[len(localMavenRepo):]
conn.request("HEAD", request_uri)
res = conn.getresponse()
res.read() # useless call for head, but prevents httplib.ResponseNotReady raise
absolute_url = parsed_uri.scheme + '://' + parsed_uri.netloc + request_uri
if res.status == 200:
content_length = res.getheader('content-length')
local_file_size = os.path.getsize(file)
if int(content_length) != int(local_file_size):
errors.append('LENGTH MISMATCH: %s differs in size. local %s <=> %s remote' % (absolute_url, content_length, local_file_size))
elif res.status == 404:
errors.append('MISSING: %s' % absolute_url)
elif res.status == 301 or res.status == 302:
errors.append('REDIRECT: %s to %s' % (absolute_url, res.getheader('location')))
else:
errors.append('ERROR: %s http response: %s %s' %(absolute_url, res.status, res.reason))
# update progressbar at the end
drawProgressBar((idx+1)/float(len(filesToCheck)))
print
if len(errors) != 0:
print 'The following errors occured (%s out of %s files)' % (len(errors), len(filesToCheck))
print
for error in errors:
print error
sys.exit(-1)
| Python | 0 | |
3a235f8525ae89ae91c333f7cd10ed307c33011c | Exclude local data from package. | setup.py | setup.py |
from __future__ import with_statement
import os
from setuptools import setup, find_packages
exclude = ["forms_builder/example_project/dev.db",
"forms_builder/example_project/local_settings.py"]
exclude = dict([(e, None) for e in exclude])
for e in exclude:
if e.endswith(".py"):
try:
os.remove("%sc" % e)
except:
pass
try:
with open(e, "r") as f:
exclude[e] = (f.read(), os.stat(e))
os.remove(e)
except Exception, e:
import pdb; pdb.set_trace()
pass
import pdb; pdb.set_trace()
try:
setup(
name = "django-forms-builder",
version = __import__("forms_builder").__version__,
author = "Stephen McDonald",
author_email = "stephen.mc@gmail.com",
description = ("A Django reusable app providing the ability for admin "
"users to create their own forms."),
long_description = open("README.rst").read(),
url = "http://github.com/stephenmcd/django-forms-builder",
zip_safe = False,
include_package_data = True,
packages = find_packages(),
install_requires = [
"sphinx-me >= 0.1.2",
"django-email-extras >= 0.1.7",
"django",
],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: Site Management",
]
)
finally:
for e in exclude:
if exclude[e] is not None:
data, stat = exclude[e]
try:
with open(e, "w") as f:
f.write(data)
os.chown(e, stat.st_uid, stat.st_gid)
os.chmod(e, stat.st_mode)
except:
pass
|
from setuptools import setup, find_packages
setup(
name = "django-forms-builder",
version = __import__("forms_builder").__version__,
author = "Stephen McDonald",
author_email = "stephen.mc@gmail.com",
description = ("A Django reusable app providing the ability for admin "
"users to create their own forms."),
long_description = open("README.rst").read(),
url = "http://github.com/stephenmcd/django-forms-builder",
zip_safe = False,
include_package_data = True,
packages = find_packages(),
install_requires = [
"sphinx-me >= 0.1.2",
"django-email-extras >= 0.1.7",
"django",
],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: Site Management",
]
)
| Python | 0 |
207116ee7ba8d8da521f497997da90066831a551 | Add codemod to replace __unicode__ with __str__ | django3_codemods/replace_unicode_with_str.py | django3_codemods/replace_unicode_with_str.py | import sys
from bowler import Query
(
Query(sys.argv[1])
.select_function("__unicode__")
.rename('__str__')
.idiff()
),
(
Query(sys.argv[1])
.select_method("__unicode__")
.is_call()
.rename('__str__')
.idiff()
)
| Python | 0.000001 | |
d85a68e36443bfcdeed2d8f1f3960d1596ef762a | Create catchtheball.py | catchtheball.py | catchtheball.py | import simplegui
import random
FRAME_WIDTH=STAGE_WIDTH=GROUND_WIDTH=821
FRAME_HEIGHT=498
STAGE_HEIGHT=FRAME_HEIGHT-30
PADDLE_HEIGHT=STAGE_HEIGHT
PADDLE_WIDTH=8
PADDLE_POS=[STAGE_WIDTH/2,PADDLE_HEIGHT]
image=simplegui.load_image("http://mrnussbaum.com/calendarclowns1/images/game_background.png")
list_of_balls=[]
colors=['Aqua','Blue','Fuchsia','Gray',
'Green','Lime','Maroon','Navy','Olive',
'Orange','Purple','Red','Silver','Teal',
'White','Yellow']
class Ball:
def __init__(self,color,radius,x_location):
self.radius=radius
self.color=color
self.location=[x_location,0]
def timer():
radius = 10
color = random.choice(colors)
x_location = random.randint(20, STAGE_WIDTH-20)
new_ball = Ball(color,radius, x_location)
list_of_balls.append(new_ball)
def draw(canvas):
canvas.draw_image(image,[FRAME_WIDTH/2,FRAME_HEIGHT/2],[FRAME_WIDTH,FRAME_HEIGHT],[FRAME_WIDTH/2,FRAME_HEIGHT/2],[FRAME_WIDTH,FRAME_HEIGHT])
for ball in list_of_balls:
ball.location[1]+=5
canvas.draw_circle(ball.location,ball.radius,10,ball.color,ball.color)
frame=simplegui.create_frame("ball",FRAME_WIDTH,FRAME_HEIGHT)
timer=simplegui.create_timer(2000,timer)
frame.set_draw_handler(draw)
frame.start()
timer.start()
| Python | 0.000007 | |
54bb69cd3646246975f723923254549bc5f11ca0 | Add default paver commands | citools/paver.py | citools/paver.py | @task
@consume_args
@needs('unit', 'integrate')
def test():
""" Run whole testsuite """
def djangonize_test_environment(test_project_module):
sys.path.insert(0, abspath(join(dirname(__file__))))
sys.path.insert(0, abspath(join(dirname(__file__), "tests")))
sys.path.insert(0, abspath(join(dirname(__file__), "tests", test_project_module)))
os.environ['DJANGO_SETTINGS_MODULE'] = "%s.settings" % test_project_module
def run_tests(test_project_module, nose_args):
djangonize_test_environment(test_project_module)
import nose
os.chdir(abspath(join(dirname(__file__), "tests", test_project_module)))
argv = ["--with-django"] + nose_args
nose.run_exit(
argv = ["nosetests"] + argv,
defaultTest = test_project_module
)
@task
@consume_args
def unit(args):
""" Run unittests """
run_tests(test_project_module="unit_project", nose_args=[]+args)
@task
@consume_args
def integrate(args):
""" Run integration tests """
run_tests(test_project_module="example_project", nose_args=["--with-selenium", "--with-djangoliveserver"]+args)
@task
def install_dependencies():
sh('pip install --upgrade -r requirements.txt')
@task
def bootstrap():
options.virtualenv = {'packages_to_install' : ['pip']}
call_task('paver.virtual.bootstrap')
sh("python bootstrap.py")
path('bootstrap.py').remove()
print '*'*80
if sys.platform in ('win32', 'winnt'):
print "* Before running other commands, You now *must* run %s" % os.path.join("bin", "activate.bat")
else:
print "* Before running other commands, You now *must* run source %s" % os.path.join("bin", "activate")
print '*'*80
@task
@needs('install_dependencies')
def prepare():
""" Prepare complete environment """
| Python | 0.000001 | |
0575a141153fb07a5f03c0681cdf727450348fc0 | Create space.py | space.py | space.py | def ParentOf(n, arr):
if arr[n] == n:
return n
else:
return ParentOf(arr[n],arr)
n, p = list(map(int, input().split()))
arr = []
for t in range(0,n):
arr.append(t)
for q in range(p):
#Quick Union the line
first, sec = list(map(int,input().split()))
arr[first] = ParentOf(sec)
#Get number of people in each group
groups = []
for q in range(0,n):
groups[q] = arr.count(q)
#groups is accurate if 0's removed
trueG = []
for t in groups:
if t != 0:
trueG.append(t)
ways = 0
for index, a in enumerate(trueG):
i = index + 1
while i < len(trueG):
ways += a * trueG[i]
i += 1
print(str(ways))
| Python | 0.001193 | |
3c074ab5c630590ca32f8951eecb3087afd8ae01 | add solution for Binary Tree Level Order Traversal II | src/binaryTreeLevelOrderTraversalII.py | src/binaryTreeLevelOrderTraversalII.py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of lists of integers
def levelOrderBottom(self, root):
self.res = []
self._dfs(root, 0)
return reversed(self.res)
def _dfs(self, root, level):
if not root:
return
if len(self.res) == level:
self.res.append([root.val])
else:
self.res[level].append(root.val)
self._dfs(root.left, level+1)
self._dfs(root.right, level+1)
| Python | 0 | |
a5f90890f3cdd516c3955c868234ea1ffc7bd093 | Verify that archiving audit + fql files work | auditlog_test.py | auditlog_test.py | import logging
import os
import os.path
import pytest
import shlex
import stat
import subprocess
import tempfile
import time
from ccmlib.node import handle_external_tool_process
from dtest import Tester
since = pytest.mark.since
logger = logging.getLogger(__name__)
@since('4.0')
class TestAuditlog(Tester):
def test_archiving(self):
cluster = self.cluster
log_dir = tempfile.mkdtemp('logs')
moved_log_dir, move_script = self._create_script()
cluster.set_configuration_options(values={'audit_logging_options': {'enabled': True,
'audit_logs_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
node = cluster.nodelist()[0]
node.stress(['write', 'n=100k', "no-warmup", "cl=ONE", "-rate", "threads=300"])
node.nodetool("disableauditlog")
assert len(os.listdir(moved_log_dir)) > 0
for f in os.listdir(log_dir):
assert not f.endswith(".cq4")
def test_fql_nodetool_options(self):
cluster = self.cluster
log_dir = tempfile.mkdtemp('logs')
moved_log_dir, move_script = self._create_script()
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'archive_command': 'conf should not be used'}})
cluster.populate(1).start(wait_for_binary_proto=True)
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog --archive-command \"%s %%path\" --roll-cycle=TEST_SECONDLY"%move_script)
node.stress(['write', 'n=100k', "no-warmup", "cl=ONE", "-rate", "threads=300"])
# make sure at least one file has been rolled and archived:
assert node.grep_log("Executing archive command", filename="debug.log")
assert len(os.listdir(moved_log_dir)) > 0
node.nodetool("disablefullquerylog")
for f in os.listdir(log_dir):
assert not f.endswith(".cq4")
# make sure the non-rolled file gets archived when we disable fql
node.watch_log_for("Archiving existing file", filename="debug.log")
def test_archiving_fql(self):
cluster = self.cluster
log_dir = tempfile.mkdtemp('logs')
moved_log_dir, move_script = self._create_script()
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog")
node.stress(['write', 'n=100k', "no-warmup", "cl=ONE", "-rate", "threads=300"])
# make sure at least one file has been rolled and archived:
assert node.grep_log("Executing archive command", filename="debug.log")
assert len(os.listdir(moved_log_dir)) > 0
node.nodetool("disablefullquerylog")
for f in os.listdir(log_dir):
assert not f.endswith(".cq4")
# make sure the non-rolled file gets archived when we disable fql
node.watch_log_for("Archiving existing file", filename="debug.log")
def test_archive_on_startup(self):
cluster = self.cluster
log_dir = tempfile.mkdtemp('logs')
moved_log_dir, move_script = self._create_script()
files = []
for i in range(10):
(_, fakelogfile) = tempfile.mkstemp(dir=log_dir, suffix='.cq4')
files.append(fakelogfile)
for f in files:
assert os.path.isfile(f)
assert len(files) == 10
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog")
for f in files:
assert not os.path.isfile(f)
filename = os.path.basename(f)
assert os.path.isfile(os.path.join(moved_log_dir, filename))
def test_archive_on_shutdown(self):
cluster = self.cluster
log_dir = tempfile.mkdtemp('logs')
moved_log_dir, move_script = self._create_script()
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog")
# adding a bunch of files after fql is enabled - these will get archived when we disable
files = []
for i in range(10):
(_, fakelogfile) = tempfile.mkstemp(dir=log_dir, suffix='.cq4')
files.append(fakelogfile)
for f in files:
assert os.path.isfile(f)
assert len(files) == 10
node.nodetool("disablefullquerylog")
for f in files:
assert not os.path.isfile(f)
filename = os.path.basename(f)
assert os.path.isfile(os.path.join(moved_log_dir, filename))
def _create_script(self):
moved_log_dir = tempfile.mkdtemp('movedlogs')
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
f.write("""
#!/bin/sh
mv $1 %s
"""%moved_log_dir)
move_script = f.name
st = os.stat(move_script)
os.chmod(move_script, st.st_mode | stat.S_IEXEC)
return (moved_log_dir, move_script)
| Python | 0 | |
79cf7834c4a92f84c3595af302c7b0bfa09331f2 | word2vec basic | Experiments/Tensorflow/Neural_Networks/logic_gate_linear_regressor.py | Experiments/Tensorflow/Neural_Networks/logic_gate_linear_regressor.py | '''
Logical Operation by 2-layer Neural Networks (using TF Layers) on TensorFlow
Author: Rowel Atienza
Project: https://github.com/roatienza/Deep-Learning-Experiments
'''
# On command line: python3 logic_gate_linear_regressor.py
# Prerequisite: tensorflow 1.0 (see tensorflow.org)
from __future__ import print_function
import tensorflow as tf
import numpy as np
import tensorflow.contrib.learn as learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn
tf.logging.set_verbosity(tf.logging.INFO)
learning_rate = 0.01
# try other values for nhidden
nhidden = 16
def fnn_model_fn(features,labels,mode):
print(features)
print(labels)
# output_labels = tf.reshape(labels,[-1,1])
dense = tf.layers.dense(features,units=nhidden,activation=tf.nn.relu,use_bias=True)
print(dense)
logits = tf.layers.dense(dense,units=1,use_bias=True)
print(logits)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=1)
if mode != learn.ModeKeys.EVAL:
# loss = tf.losses.sigmoid_cross_entropy(output_labels,logits)
# loss = tf.losses.mean_squared_error(labels=output_labels,predictions=logits)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode==learn.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=learning_rate,
optimizer="SGD")
predictions = {
"classes": tf.round(logits),
"probabilities": tf.nn.softmax(
logits, name="softmax_tensor")
}
return model_fn.ModelFnOps(
mode=mode, predictions=predictions, loss=loss, train_op=train_op)
def main(arg):
x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
# try other logics; xor = [0., 1., 1., 0.], or = [0., 1., 1., 1.], and = [0., 0., 0., 1.], etc
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)
classifier = learn.Estimator(model_fn=fnn_model_fn, model_dir="/tmp/fnn")
to_log = {"probabilities": "softmax_tensor"}
log_hook = tf.train.LoggingTensorHook(to_log, every_n_iter=10)
classifier.fit(x=x_data, y=y_data, batch_size=1, steps=50, monitors=[log_hook])
metrics = {
"accuracy":
learn.MetricSpec(
metric_fn=tf.metrics.accuracy, prediction_key="classes"),
}
eval_results = classifier.evaluate(
x=x_data, y=y_data, metrics=metrics)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| Python | 0.999103 | |
f22b6368bdfe91cff06ede51c1caad04f769b437 | add management command to load location type into supply point | custom/colalife/management/commands/load_location_type_into_supply_point.py | custom/colalife/management/commands/load_location_type_into_supply_point.py | from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.locations.models import Location
from django.core.management import BaseCommand
class Command(BaseCommand):
help = 'Store location type with supply point.'
def handle(self, *args, **options):
for location_type in ["wholesaler", "retailer"]:
for location in Location.filter_by_type("colalifezambia", location_type):
supply_point_case = SupplyPointCase.get_by_location(location)
supply_point_case.location_type = location_type
supply_point_case.save()
| Python | 0 | |
3e02fe79f4fad6f5252af750a13d74d7a4f82cc5 | read in the file, probably badly | src/emc/usr_intf/touchy/filechooser.py | src/emc/usr_intf/touchy/filechooser.py | # Touchy is Copyright (c) 2009 Chris Radek <chris@timeguy.com>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import dircache
import os
class filechooser:
def __init__(self, gtk, emc, labels, eventboxes, program):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.program = program
self.gtk = gtk
self.emc = emc
self.emccommand = emc.command()
self.fileoffset = 0
self.dir = os.path.join(os.getenv('HOME'), 'emc2', 'nc_files')
self.files = dircache.listdir(self.dir)
self.selected = -1
self.populate()
def populate(self):
files = self.files[self.fileoffset:]
for i in range(self.numlabels):
l = self.labels[i]
e = self.eventboxes[i]
if i < len(files):
l.set_text(files[i])
else:
l.set_text('')
if self.selected == self.fileoffset + i:
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse('#fff'))
else:
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse('#ccc'))
def select(self, eventbox, event):
n = int(eventbox.get_name()[20:])
self.selected = self.fileoffset + n
self.emccommand.mode(self.emc.MODE_MDI)
fn = os.path.join(self.dir, self.labels[n].get_text())
f = file(fn, 'r')
self.lines = f.readlines()
f.close()
self.emccommand.program_open(fn)
self.populate()
def up(self, b):
self.fileoffset -= self.numlabels
if self.fileoffset < 0:
self.fileoffset = 0
self.populate()
def down(self, b):
self.fileoffset += self.numlabels
self.populate()
| # Touchy is Copyright (c) 2009 Chris Radek <chris@timeguy.com>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import dircache
import os
class filechooser:
def __init__(self, gtk, emc, labels, eventboxes, program):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.program = program
self.gtk = gtk
self.emc = emc
self.emccommand = emc.command()
self.fileoffset = 0
self.dir = os.path.join(os.getenv('HOME'), 'emc2', 'nc_files')
self.files = dircache.listdir(self.dir)
self.selected = -1
self.populate()
def populate(self):
files = self.files[self.fileoffset:]
for i in range(self.numlabels):
l = self.labels[i]
e = self.eventboxes[i]
if i < len(files):
l.set_text(files[i])
else:
l.set_text('')
if self.selected == self.fileoffset + i:
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse('#fff'))
else:
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse('#ccc'))
def select(self, eventbox, event):
n = int(eventbox.get_name()[20:])
self.selected = self.fileoffset + n
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.program_open(os.path.join(self.dir, self.labels[n].get_text()))
self.populate()
def up(self, b):
self.fileoffset -= self.numlabels
if self.fileoffset < 0:
self.fileoffset = 0
self.populate()
def down(self, b):
self.fileoffset += self.numlabels
self.populate()
| Python | 0.000001 |
2c98e54c7f2138b4472336520ab18af8f49b9b48 | test networks on test data corresponding to each dataset | test_network.py | test_network.py | import keras
from keras.optimizers import SGD, adadelta, rmsprop, adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.metrics import matthews_correlation, precision, recall
import cPickle
import numpy as np
import getpass
username = getpass.getuser()
from foo_two import foo
def get_data(n_dataset):
f = file('MODS_all_data_bw_224_224_{0}.pkl'.format(n_dataset),'rb')
data = cPickle.load(f)
f.close()
training_data = data[0]
validation_data = data[1]
t_data = training_data[0]
t_label = training_data[1]
test_data = validation_data[0]
test_label = validation_data[1]
t_data = np.array(t_data)
t_label = np.array(t_label)
test_data = np.array(test_data)
test_label = np.array(test_label)
t_data = t_data.reshape(t_data.shape[0], 1, 224, 224)
test_data = test_data.reshape(test_data.shape[0], 1, 224, 224)
#less precision means less memory needed: 64 -> 32 (half the memory used)
t_data = t_data.astype('float32')
test_data = test_data.astype('float32')
return (t_data, t_label), (test_data, test_label)
class LossAccHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accu = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accu.append(logs.get('acc'))
nb_classes = 2
nb_epoch = 100
data_augmentation = True
n_dataset = 7
plot_loss = True
#Hyperparameters for tuning
dropout = 0.5 #[0.0, 0.25, 0.5, 0.7]
batch_size = 16 #[32, 70, 100, 150]
optimizer = 'rmsprop' #['sgd', 'adadelta']
test_metrics = []
model = foo()
for i in xrange(n_dataset):
history = LossAccHistory()
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = get_data(i)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_test /= 255
print(X_test.shape[0], 'test samples')
#Shows all layers and names
for v, layer in enumerate(model.layers):
print(v, layer.name)
print('Training of the network, using real-time data augmentation.')
model.compile(loss='binary_crossentropy',
optimizer= rmsprop(lr=0.001), #adadelta
metrics=['accuracy', 'matthews_correlation', 'precision', 'recall'])
score = model.evaluate(X_test, Y_test, verbose=1)
print (model.metrics_names, score)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
m = (model.metrics_names, score, 'dataset {0}'.format(i))
test_metrics.append(m)
model.reset_states()
#save test metrics to txt file
file = open('cut_MODS_test_metrics.txt', 'w')
for i in test_metrics:
file.write('%s\n' % i)
file.close()
print test_metrics
| Python | 0.000037 | |
ed94317df99493c24c58a1e1aa553a8f822e793f | Test cases | erpnext/accounts/report/sales_payment_summary/test_sales_payment_summary.py | erpnext/accounts/report/sales_payment_summary/test_sales_payment_summary.py | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import erpnext
from erpnext.accounts.report.sales_payment_summary.sales_payment_summary import get_mode_of_payments, get_mode_of_payment_details
from frappe.utils import nowdate
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
test_dependencies = ["Sales Invoice"]
class TestSalesPaymentSummary(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_mode_of_payments(self):
si = frappe.get_all("Sales Invoice", fields=["name", "docstatus"])
filters = get_filters()
for invoice in si[:2]:
doc = frappe.get_doc("Sales Invoice", invoice.name)
new_doc = frappe.copy_doc(doc)
new_doc.insert()
new_doc.submit()
try:
new_doc.submit()
except Exception as e:
pass
if int(new_doc.name[-3:])%2 == 0:
bank_account = "_Test Cash - _TC"
mode_of_payment = "Cash"
else:
bank_account = "_Test Bank - _TC"
mode_of_payment = "Credit Card"
pe = get_payment_entry("Sales Invoice", new_doc.name, bank_account=bank_account)
pe.reference_no = "_Test"
pe.reference_date = nowdate()
pe.mode_of_payment = mode_of_payment
pe.insert()
pe.submit()
mop = get_mode_of_payments(filters)
self.assertTrue('Credit Card' in mop.values()[0])
self.assertTrue('Cash' in mop.values()[0])
# Cancel all Cash payment entry and check if this mode of payment is still fetched.
payment_entries = frappe.get_all("Payment Entry", filters={"mode_of_payment": "Cash", "docstatus": 1}, fields=["name", "docstatus"])
for payment_entry in payment_entries:
pe = frappe.get_doc("Payment Entry", payment_entry.name)
pe.cancel()
mop = get_mode_of_payments(filters)
self.assertTrue('Credit Card' in mop.values()[0])
self.assertTrue('Cash' not in mop.values()[0])
def test_get_mode_of_payments_details(self):
si = frappe.get_all("Sales Invoice", fields=["name", "docstatus"])
filters = get_filters()
for invoice in si[:2]:
doc = frappe.get_doc("Sales Invoice", invoice.name)
new_doc = frappe.copy_doc(doc)
new_doc.insert()
new_doc.submit()
try:
new_doc.submit()
except Exception as e:
pass
if int(new_doc.name[-3:])%2 == 0:
bank_account = "_Test Cash - _TC"
mode_of_payment = "Cash"
else:
bank_account = "_Test Bank - _TC"
mode_of_payment = "Credit Card"
pe = get_payment_entry("Sales Invoice", new_doc.name, bank_account=bank_account)
pe.reference_no = "_Test"
pe.reference_date = nowdate()
pe.mode_of_payment = mode_of_payment
pe.insert()
pe.submit()
mopd = get_mode_of_payment_details(filters)
mopd_values = mopd.values()[0]
for mopd_value in mopd_values:
if mopd_value[0] == "Credit Card":
cc_init_amount = mopd_value[1]
# Cancel one Credit Card Payment Entry and check that it is not fetched in mode of payment details.
payment_entries = frappe.get_all("Payment Entry", filters={"mode_of_payment": "Credit Card", "docstatus": 1}, fields=["name", "docstatus"])
for payment_entry in payment_entries[:1]:
pe = frappe.get_doc("Payment Entry", payment_entry.name)
pe.cancel()
mopd = get_mode_of_payment_details(filters)
mopd_values = mopd.values()[0]
for mopd_value in mopd_values:
if mopd_value[0] == "Credit Card":
cc_final_amount = mopd_value[1]
self.assertTrue(cc_init_amount > cc_final_amount)
def get_filters():
return {
"from_date": "1900-01-01",
"to_date": nowdate(),
"company": "_Test Company"
} | Python | 0.000001 | |
37793ec10e2b27e64efaa3047ae89a6d10a6634d | Update urlrewrite_redirect.py | flexget/plugins/urlrewrite_redirect.py | flexget/plugins/urlrewrite_redirect.py | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('urlrewrite_redirect')
class UrlRewriteRedirect(object):
"""Rewrites urls which actually redirect somewhere else."""
def __init__(self):
self.processed = set()
def on_task_start(self):
self.processed = set()
def url_rewritable(self, task, entry):
if not any(entry['url'].startswith(adapter) for adapter in task.requests.adapters):
return False
return entry['url'] not in self.processed
def url_rewrite(self, task, entry):
try:
# Don't accidentally go online in unit tests
if task.manager.unit_test:
return
auth = None
if 'download_auth' in entry:
auth = entry['download_auth']
log.debug('Custom auth enabled for %s url_redirect: %s' % (entry['title'], entry['download_auth']))
r = task.requests.head(entry['url'], auth=auth)
if 300 <= r.status_code < 400 and 'location' in r.headers:
entry['url'] = r.headers['location']
except Exception:
pass
finally:
# Make sure we don't try to rewrite this url again
self.processed.add(entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteRedirect, 'urlrewrite_redirect', groups=['urlrewriter'], api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('urlrewrite_redirect')
class UrlRewriteRedirect(object):
"""Rewrites urls which actually redirect somewhere else."""
def __init__(self):
self.processed = set()
def on_task_start(self):
self.processed = set()
def url_rewritable(self, task, entry):
if not any(entry['url'].startswith(adapter) for adapter in task.requests.adapters):
return False
return entry['url'] not in self.processed
def url_rewrite(self, task, entry):
try:
# Don't accidentally go online in unit tests
if task.manager.unit_test:
return
r = task.requests.head(entry['url'])
if 300 <= r.status_code < 400 and 'location' in r.headers:
entry['url'] = r.headers['location']
except Exception:
pass
finally:
# Make sure we don't try to rewrite this url again
self.processed.add(entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteRedirect, 'urlrewrite_redirect', groups=['urlrewriter'], api_ver=2)
| Python | 0 |
e4ca040124e26b06a11e7fb51c3622a213285d24 | Create thresholding.py | thresholding.py | thresholding.py | import numpy as np
from PIL import Image
def discretize(a):
return np.uint8((a > 50)*255)
image_id = 101
dirty_image_path = "../input/train/%d.png" % image_id
clean_image_path = "../input/train_cleaned/%d.png" % image_id
dirty = Image.open(dirty_image_path)
clean = Image.open(clean_image_path)
dirty.save("dirty.png")
clean.save("clean.png")
clean_array = np.asarray(clean)
dirty_array = np.asarray(dirty)
discretized_array = discretize(dirty_array)
Image.fromarray(discretized_array).save("discretized.png")
html = """<html>
<body>
<h1>Thresholding</h1>
<p>This is a very simple attempt to clean up an image by thresholding the pixel value at 50. (Under 50 goes to 0, above 50 goes to 255.)</p>
<h2>Dirty image</h2>
<img src="dirty.png">
<h2>Cleaned up by thresholding</h2>
<img src="discretized.png">
<h2>Original clean image</h2>
<img src="clean.png">
</body>
</html>
"""
with open("output.html", "w") as output_file:
output_file.write(html)
| Python | 0 | |
d78444cdb6018e2fe49905638ce7645e8de5738b | add util/csv_melt.py | util/csv_melt.py | util/csv_melt.py | #!/usr/bin/env python
# https://github.com/shenwei356/bio_scripts"
import argparse
import csv
import re
import sys
import pandas as pd
parser = argparse.ArgumentParser(
description="Melt CSV file, you can append new column",
epilog="https://github.com/shenwei356/bio_scripts")
parser.add_argument(
'key',
type=str,
help=
'Column name of key in csvfile. Multiple values shoud be separated by comma')
parser.add_argument('csvfile', type=str, help='CSV file with head row!')
parser.add_argument('--var_name',
type=str,
default='var_name',
help='name to use for the "variable" column')
parser.add_argument('--value_name',
type=str,
default='value_name',
help='name to use for the "value" column')
parser.add_argument('-a',
'--append',
type=str,
help='another column. format: column=value')
parser.add_argument('-o', '--outfile', type=str, help='output file')
parser.add_argument('--fs', type=str, default=",", help='field separator [,]')
parser.add_argument('--fs-out',
type=str,
help='field separator of ouput [same as --fs]')
parser.add_argument('--qc', type=str, default='"', help='Quote char["]')
parser.add_argument('-t',
action='store_true',
help='field separator is "\\t". Quote char is "\\t"')
args = parser.parse_args()
if args.t:
args.fs, args.qc = '\t', '\t'
if not args.fs_out:
args.fs_out = args.fs
pattern = '^([^=]+)=([^=]+)$'
if args.append:
if not re.search(pattern, args.append):
sys.stderr.write("bad format for option -a: {}".format(args.append))
sys.exit(1)
colname, colvalue = re.findall(pattern, args.append)[0]))
keys = list()
if ',' in args.key:
keys = [k for k in args.key.split(',')]
else:
keys = [args.key]
# ------------------------------------------------------------
df = pd.read_csv(args.csvfile,
sep=args.fs,
quotechar=args.qc) # , index_col=keys)
df = pd.melt(df,
id_vars=keys,
var_name=args.var_name,
value_name=args.value_name)
if args.append:
df[colname] = pd.Series([colvalue] * len(df))
if args.outfile:
df.to_csv(args.outfile, sep=args.fs, quotechar=args.qc, index=0)
else:
df.to_csv(sys.stdout, sep=args.fs, quotechar=args.qc, index=0)
| Python | 0.000008 | |
326010629c6d5bb1274d1db1231f5b84c394b4e4 | Add some api tests for ZHA (#20909) | tests/components/zha/test_api.py | tests/components/zha/test_api.py | """Test ZHA API."""
from unittest.mock import Mock
import pytest
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.components.switch import DOMAIN
from homeassistant.components.zha.api import (
async_load_api, WS_ENTITIES_BY_IEEE, WS_ENTITY_CLUSTERS, ATTR_IEEE, TYPE,
ID, NAME, WS_ENTITY_CLUSTER_ATTRIBUTES, WS_ENTITY_CLUSTER_COMMANDS
)
from homeassistant.components.zha.core.const import (
ATTR_CLUSTER_ID, ATTR_CLUSTER_TYPE, IN
)
from .common import async_init_zigpy_device
@pytest.fixture
async def zha_client(hass, config_entry, zha_gateway, hass_ws_client):
"""Test zha switch platform."""
from zigpy.zcl.clusters.general import OnOff
# load the ZHA API
async_load_api(hass, Mock(), zha_gateway)
# create zigpy device
await async_init_zigpy_device(
hass, [OnOff.cluster_id], [], None, zha_gateway)
# load up switch domain
await hass.config_entries.async_forward_entry_setup(
config_entry, DOMAIN)
await hass.async_block_till_done()
return await hass_ws_client(hass)
async def test_entities_by_ieee(hass, config_entry, zha_gateway, zha_client):
"""Test getting entity refs by ieee address."""
await zha_client.send_json({
ID: 5,
TYPE: WS_ENTITIES_BY_IEEE,
})
msg = await zha_client.receive_json()
assert '00:0d:6f:00:0a:90:69:e7' in msg['result']
assert len(msg['result']['00:0d:6f:00:0a:90:69:e7']) == 2
async def test_entity_clusters(hass, config_entry, zha_gateway, zha_client):
"""Test getting entity cluster info."""
await zha_client.send_json({
ID: 5,
TYPE: WS_ENTITY_CLUSTERS,
ATTR_ENTITY_ID: 'switch.fakemanufacturer_fakemodel_0a9069e7_1_6',
ATTR_IEEE: '00:0d:6f:00:0a:90:69:e7'
})
msg = await zha_client.receive_json()
assert len(msg['result']) == 1
cluster_info = msg['result'][0]
assert cluster_info[TYPE] == IN
assert cluster_info[ID] == 6
assert cluster_info[NAME] == 'OnOff'
async def test_entity_cluster_attributes(
hass, config_entry, zha_gateway, zha_client):
"""Test getting entity cluster attributes."""
await zha_client.send_json({
ID: 5,
TYPE: WS_ENTITY_CLUSTER_ATTRIBUTES,
ATTR_ENTITY_ID: 'switch.fakemanufacturer_fakemodel_0a9069e7_1_6',
ATTR_IEEE: '00:0d:6f:00:0a:90:69:e7',
ATTR_CLUSTER_ID: 6,
ATTR_CLUSTER_TYPE: IN
})
msg = await zha_client.receive_json()
attributes = msg['result']
assert len(attributes) == 4
for attribute in attributes:
assert attribute[ID] is not None
assert attribute[NAME] is not None
async def test_entity_cluster_commands(
hass, config_entry, zha_gateway, zha_client):
"""Test getting entity cluster commands."""
await zha_client.send_json({
ID: 5,
TYPE: WS_ENTITY_CLUSTER_COMMANDS,
ATTR_ENTITY_ID: 'switch.fakemanufacturer_fakemodel_0a9069e7_1_6',
ATTR_IEEE: '00:0d:6f:00:0a:90:69:e7',
ATTR_CLUSTER_ID: 6,
ATTR_CLUSTER_TYPE: IN
})
msg = await zha_client.receive_json()
commands = msg['result']
assert len(commands) == 6
for command in commands:
assert command[ID] is not None
assert command[NAME] is not None
assert command[TYPE] is not None
| Python | 0 | |
3593fdc86dcb2559677b3b1f996c5c0e00659bc9 | Add parser for 7elc (b) | d_parser/d_spider_7elc.py | d_parser/d_spider_7elc.py | from d_parser.d_spider_common import DSpiderCommon
from d_parser.helpers.re_set import Ree
from helpers.url_generator import UrlGenerator
VERSION = 28
# Warn: Don't remove task argument even if not use it (it's break grab and spider crashed)
# Warn: noinspection PyUnusedLocal
class DSpider(DSpiderCommon):
def __init__(self, thread_number, try_limit=0):
super().__init__(thread_number, try_limit)
# fetch categories
def task_initial(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
# catalog
catalog = grab.doc.select('//div[@class="topcatalog"]//a[re:match(@href, "/.+/.+/")]')
for link in catalog:
link = UrlGenerator.get_page_params(self.domain, link.attr('href'), {})
yield self.do_task('parse_page', link, 90)
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse page pagination
def task_parse_page(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
catalog = grab.doc.select('//div[@class="listcatalog"]')
# gerenate new tasks
links = catalog.select('.//div[@class="navigation"]/div[@class="nav"]//a')
max_page = 1
for link in links:
page_number = link.text('')
if page_number and Ree.number.match(page_number):
max_page = max(max_page, int(page_number))
if max_page > 1:
for page in range(2, max_page):
next_page = UrlGenerator.get_page_params(task.url, '', {'PAGEN_1': page})
yield self.do_task('parse_page_items', next_page, 90)
except Exception as e:
self._process_error(grab, task, e)
finally:
self.process_finally(task)
# parse page
def task_parse_page_items(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
catalog = grab.doc.select('//div[@class="listcatalog"]')
# parse items links
items_list = catalog.select('.//table[@class="lclistitem"]//td[@class="name"]//a')
for link in items_list:
link = UrlGenerator.get_page_params(self.domain, link.attr('href'), {})
yield self.do_task('parse_item', link, 100, last=True)
except Exception as e:
self._process_error(grab, task, e)
finally:
self.process_finally(task)
# parse single item
def task_parse_item(self, grab, task):
try:
# common block with info
product_info = grab.doc.select('//div[@class="itemcatalog"]')
# parse fields
# A = name
product_name = product_info.select('.//h1').text()
# B = count (quantity)
# C = status (delivery)
product_count_string = product_info.select('.//table[@id="hint-table"]//tr[1]//td[2]').text('')
if product_count_string == 'Имеется в наличии':
product_count = '-1'
product_status = '0'
elif product_count_string in ['Ожидается поступление', 'Под заказ']:
product_count = '-1'
product_status = '-1'
else:
self.log.warning(task, f'Unknown count status {product_count_string} skip...')
return
tin_tab = product_info.select('.//table[@class="tintab"]')
# D = unit (measure)
product_unit = tin_tab.select('.//tr[2]/td[2]').text('ед.')
# E = price
product_price = product_info.select('.//span[@class="price"]').text('').replace(' руб.', '')
if product_price == 'по запросу':
product_price = '-1'
if not product_price or not Ree.float.match(product_price):
self.log.warning(task, f'Unknown price status {product_price}, skip...')
return
# F = vendor code (sku)
product_vendor_code = tin_tab.select('.//tr[1]/td[2]').text('')
# G = vendor (manufacture)
product_vendor = tin_tab.select('.//tr[last()]/td[2]').text('')
# H = photo url
product_photo_url_raw = product_info.select('.//a[@itemprop="image"]').attr('href', '')
if product_photo_url_raw:
product_photo_url = UrlGenerator.get_page_params(self.domain, product_photo_url_raw, {})
else:
product_photo_url = ''
# I = description (properties)
product_description = {}
# try parse full props
for row in tin_tab.select('.//tr'):
key = row.select('./td[1]').text()
value = row.select('./td[2]').text()
if key:
product_description[key] = value
# common
item_description_rows = grab.doc.select('//div[@itemprop="description"]')
item_description = ''
for row in item_description_rows:
if row.node().tag not in ['table', 'img']:
item_description += row.text('')
if item_description:
product_description['Техническое описание'] = item_description
# save
self.result.append({
'name': product_name,
'quantity': product_count,
'delivery': product_status,
'measure': product_unit,
'price': product_price,
'sku': product_vendor_code,
'manufacture': product_vendor,
'photo': product_photo_url,
'properties': product_description
})
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task, last=True)
| Python | 0.000786 | |
377c61203d2e684b9b8e113eb120213e85c4487f | Fix call to super() (#19279) | homeassistant/components/light/lutron.py | homeassistant/components/light/lutron.py | """
Support for Lutron lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.lutron/
"""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.components.lutron import (
LutronDevice, LUTRON_DEVICES, LUTRON_CONTROLLER)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron lights."""
devs = []
for (area_name, device) in hass.data[LUTRON_DEVICES]['light']:
dev = LutronLight(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
add_entities(devs, True)
def to_lutron_level(level):
"""Convert the given HASS light level (0-255) to Lutron (0.0-100.0)."""
return float((level * 100) / 255)
def to_hass_level(level):
"""Convert the given Lutron (0.0-100.0) light level to HASS (0-255)."""
return int((level * 255) / 100)
class LutronLight(LutronDevice, Light):
"""Representation of a Lutron Light, including dimmable."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the light."""
self._prev_brightness = None
super().__init__(area_name, lutron_device, controller)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of the light."""
new_brightness = to_hass_level(self._lutron_device.last_level())
if new_brightness != 0:
self._prev_brightness = new_brightness
return new_brightness
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs and self._lutron_device.is_dimmable:
brightness = kwargs[ATTR_BRIGHTNESS]
elif self._prev_brightness == 0:
brightness = 255 / 2
else:
brightness = self._prev_brightness
self._prev_brightness = brightness
self._lutron_device.level = to_lutron_level(brightness)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._lutron_device.level = 0
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = {'lutron_integration_id': self._lutron_device.id}
return attr
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_level() > 0
def update(self):
"""Call when forcing a refresh of the device."""
if self._prev_brightness is None:
self._prev_brightness = to_hass_level(self._lutron_device.level)
| """
Support for Lutron lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.lutron/
"""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.components.lutron import (
LutronDevice, LUTRON_DEVICES, LUTRON_CONTROLLER)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron lights."""
devs = []
for (area_name, device) in hass.data[LUTRON_DEVICES]['light']:
dev = LutronLight(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
add_entities(devs, True)
def to_lutron_level(level):
"""Convert the given HASS light level (0-255) to Lutron (0.0-100.0)."""
return float((level * 100) / 255)
def to_hass_level(level):
"""Convert the given Lutron (0.0-100.0) light level to HASS (0-255)."""
return int((level * 255) / 100)
class LutronLight(LutronDevice, Light):
"""Representation of a Lutron Light, including dimmable."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the light."""
self._prev_brightness = None
super().__init__(self, area_name, lutron_device, controller)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of the light."""
new_brightness = to_hass_level(self._lutron_device.last_level())
if new_brightness != 0:
self._prev_brightness = new_brightness
return new_brightness
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs and self._lutron_device.is_dimmable:
brightness = kwargs[ATTR_BRIGHTNESS]
elif self._prev_brightness == 0:
brightness = 255 / 2
else:
brightness = self._prev_brightness
self._prev_brightness = brightness
self._lutron_device.level = to_lutron_level(brightness)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._lutron_device.level = 0
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = {}
attr['lutron_integration_id'] = self._lutron_device.id
return attr
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_level() > 0
def update(self):
"""Call when forcing a refresh of the device."""
if self._prev_brightness is None:
self._prev_brightness = to_hass_level(self._lutron_device.level)
| Python | 0 |
0b1fc2eb8dad6e5b41e80c5b0d97b9f8a20f9afa | Add utils.py | krcurrency/utils.py | krcurrency/utils.py | """:mod:`krcurrency.utils` --- Helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from bs4 import BeautifulSoup as BS
import requests
__all__ = 'request',
def request(url, encoding='utf-8', parselib='lxml'):
"""url로 요청한 후 돌려받은 값을 BeautifulSoup 객체로 변환해서 반환합니다.
"""
r = requests.get(url)
if r.status_code != 200:
return None
soup = None
try:
soup = BeautifulSoup(r.text, parselib)
except Exception as e:
pass
return soup
| Python | 0.000004 | |
76a2c80b015228dd4c6aa932ca9b2faece23a714 | Create multiplesof3and5.py | multiplesof3and5.py | multiplesof3and5.py | #If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
#The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
answer = 0
for i in range (1,1000);
if i%3 = 0 or i%5 = 0;
answer = answer + i
else;
continue
print answer
| Python | 0.998635 | |
2d1624f088431e5f71214988499f732695a82b16 | Bump version 0.15.0rc3 --> 0.15.0rc4 | lbrynet/__init__.py | lbrynet/__init__.py | import logging
__version__ = "0.15.0rc4"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
| import logging
__version__ = "0.15.0rc3"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
| Python | 0 |
a79e76470f747e6dc060c7aa3e0f02ee825eeb56 | Add back enikshay missing foreign key constraints | custom/enikshay/management/commands/add_back_enikshay_foreign_keys.py | custom/enikshay/management/commands/add_back_enikshay_foreign_keys.py | from datetime import datetime
from django.db import connections
from django.core.management.base import BaseCommand
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
def foreign_key_exists(db_alias, table_name, foreign_key_name):
cursor = connections[db_alias].cursor()
cursor.execute(
"SELECT 1 FROM pg_constraint WHERE conname = %s AND conrelid = %s::regclass",
[foreign_key_name, table_name]
)
return cursor.fetchone() is not None
def add_locations_sqllocation_parent_fk(db_alias):
cursor = connections[db_alias].cursor()
cursor.execute(
"ALTER TABLE locations_sqllocation "
"ADD CONSTRAINT locations_sqlloc_parent_id_2ffc03fb_fk_locations_sqllocation_id "
"FOREIGN KEY (parent_id) REFERENCES locations_sqllocation(id) DEFERRABLE INITIALLY DEFERRED"
)
def add_form_processor_xformattachmentsql_form_id_fk(db_alias):
cursor = connections[db_alias].cursor()
cursor.execute(
"ALTER TABLE form_processor_xformattachmentsql "
"ADD CONSTRAINT for_form_id_d184240c_fk_form_processor_xforminstancesql_form_id "
"FOREIGN KEY (form_id) REFERENCES form_processor_xforminstancesql(form_id) DEFERRABLE INITIALLY DEFERRED"
)
def add_form_processor_commcarecaseindexsql_case_id_fk(db_alias):
cursor = connections[db_alias].cursor()
cursor.execute(
"ALTER TABLE form_processor_commcarecaseindexsql "
"ADD CONSTRAINT form_case_id_be4cb9e1_fk_form_processor_commcarecasesql_case_id "
"FOREIGN KEY (case_id) REFERENCES form_processor_commcarecasesql(case_id) DEFERRABLE INITIALLY DEFERRED"
)
def add_form_processor_casetransaction_case_id_fk(db_alias):
cursor = connections[db_alias].cursor()
cursor.execute(
"ALTER TABLE form_processor_casetransaction "
"ADD CONSTRAINT form_case_id_0328b100_fk_form_processor_commcarecasesql_case_id "
"FOREIGN KEY (case_id) REFERENCES form_processor_commcarecasesql(case_id) DEFERRABLE INITIALLY DEFERRED"
)
class Command(BaseCommand):
help = ""
log_file = None
def log(self, text):
self.log_file.write(datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S: "))
self.log_file.write(text)
self.log_file.write('\n')
def add_arguments(self, parser):
parser.add_argument(
'--check-only',
action='store_true',
dest='check_only',
default=False,
help="Only check if the foreign keys exist but don't add anything",
)
def try_to_add_fk(self, function, db_alias):
try:
function(db_alias)
self.log(" foreign key added")
except Exception as e:
self.log(" error adding foreign key: %s" % e)
def handle_locations_sqllocation(self):
self.log(" handling locations_sqllocation")
if foreign_key_exists('default', 'locations_sqllocation', 'locations_sqlloc_parent_id_2ffc03fb_fk_locations_sqllocation_id'):
self.log(" foreign key exists")
else:
self.log(" foreign key DOES NOT exist")
if not self.check_only:
self.try_to_add_fk(add_locations_sqllocation_parent_fk, 'default')
def handle_form_processor_xformattachmentsql(self, db_alias):
self.log(" handling form_processor_xformattachmentsql")
if foreign_key_exists(db_alias, 'form_processor_xformattachmentsql', 'for_form_id_d184240c_fk_form_processor_xforminstancesql_form_id'):
self.log(" foreign key exists")
else:
self.log(" foreign key DOES NOT exist")
if not self.check_only:
self.try_to_add_fk(add_form_processor_xformattachmentsql_form_id_fk, db_alias)
def handle_form_processor_commcarecaseindexsql(self, db_alias):
self.log(" handling form_processor_commcarecaseindexsql")
if foreign_key_exists(db_alias, 'form_processor_commcarecaseindexsql', 'form_case_id_be4cb9e1_fk_form_processor_commcarecasesql_case_id'):
self.log(" foreign key exists")
else:
self.log(" foreign key DOES NOT exist")
if not self.check_only:
self.try_to_add_fk(add_form_processor_commcarecaseindexsql_case_id_fk, db_alias)
def handle_form_processor_casetransaction(self, db_alias):
self.log(" handling form_processor_casetransaction")
if foreign_key_exists(db_alias, 'form_processor_casetransaction', 'form_case_id_0328b100_fk_form_processor_commcarecasesql_case_id'):
self.log(" foreign key exists")
else:
self.log(" foreign key DOES NOT exist")
if not self.check_only:
self.try_to_add_fk(add_form_processor_casetransaction_case_id_fk, db_alias)
def handle(self, check_only, **options):
self.check_only = check_only
with open('add_back_enikshay_foreign_keys.log', 'a') as f:
self.log_file = f
self.log("")
self.log("running script to add back missing foreign keys")
self.log("check_only is: %s" % check_only)
self.log(" handling db: default")
self.handle_locations_sqllocation()
for db_alias in get_db_aliases_for_partitioned_query():
self.log(" handling db: %s" % db_alias)
self.handle_form_processor_xformattachmentsql(db_alias)
self.handle_form_processor_commcarecaseindexsql(db_alias)
self.handle_form_processor_casetransaction(db_alias)
| Python | 0.000009 | |
5dd4deba3d5a53406e735aadad5ac917919b3852 | add tests for PlotableObject | tests/unit/TestPlotableObject.py | tests/unit/TestPlotableObject.py | import os
import unittest
import ROOT
from PyAnalysisTools.PlottingUtils import PlotableObject as po
cwd = os.path.dirname(__file__)
ROOT.gROOT.SetBatch(True)
class TestPlotableObject(unittest.TestCase):
def test_ctor(self):
obj = po.PlotableObject()
self.assertIsNone(obj.plot_object)
self.assertTrue(obj.is_ref)
self.assertEqual(-1, obj.ref_id)
self.assertEqual('', obj.label)
self.assertIsNone(obj.cuts)
self.assertIsNone(obj.process)
self.assertEqual('Marker', obj.draw_option)
self.assertEqual('Marker', obj.draw)
self.assertEqual(1, obj.marker_color)
self.assertEqual(1, obj.marker_size)
self.assertEqual(1, obj.marker_style)
self.assertEqual(1, obj.line_color)
self.assertEqual(1, obj.line_width)
self.assertEqual(1, obj.line_style)
self.assertEqual(0, obj.fill_color)
self.assertEqual(0, obj.fill_style)
def tests_palettes(self):
color_palette = [ROOT.kGray + 3, ROOT.kRed + 2, ROOT.kAzure + 4, ROOT.kSpring - 6, ROOT.kOrange - 3,
ROOT.kCyan - 3, ROOT.kPink - 2, ROOT.kSpring - 9, ROOT.kMagenta - 5]
marker_style_palette_filled = [20, 22, 23, 33, 34, 29, 2]
marker_style_palette_empty = [24, 26, 32, 27, 28, 30, 5]
line_style_palette_homogen = [1, 1, 1, 1, 1]
line_style_palette_heterogen = [1, 1, 4, 8, 6]
fill_style_palette_left = [3305, 3315, 3325, 3335, 3345, 3365, 3375, 3385]
fill_style_palette_right = [3359, 3351, 3352, 3353, 3354, 3356, 3357, 3358]
self.assertEqual(color_palette, po.color_palette)
self.assertEqual(marker_style_palette_filled, po.marker_style_palette_filled)
self.assertEqual(marker_style_palette_empty, po.marker_style_palette_empty)
self.assertEqual(line_style_palette_homogen, po.line_style_palette_homogen)
self.assertEqual(line_style_palette_heterogen, po.line_style_palette_heterogen)
self.assertEqual(fill_style_palette_left, po.fill_style_palette_left)
self.assertEqual(fill_style_palette_right, po.fill_style_palette_right)
| Python | 0 | |
f0651b2b68ecb4ac093a07d72722b65ea134baa9 | Remove trailing slashes. | pelican/__init__.py | pelican/__init__.py | import argparse
import os
from pelican.settings import read_settings
from pelican.utils import clean_output_dir
from pelican.writers import Writer
from pelican.generators import (ArticlesGenerator, PagesGenerator,
StaticGenerator, PdfGenerator)
def init_params(settings=None, path=None, theme=None, output_path=None,
markup=None, keep=False):
"""Read the settings, and performs some checks on the environment
before doing anything else.
"""
if settings is None:
settings = {}
settings = read_settings(settings)
path = path or settings['PATH']
if path.endswith('/'):
path = path[:-1]
# define the default settings
theme = theme or settings['THEME']
output_path = output_path or settings['OUTPUT_PATH']
output_path = os.path.realpath(output_path)
markup = markup or settings['MARKUP']
keep = keep or settings['KEEP_OUTPUT_DIRECTORY']
# find the theme in pelican.theme if the given one does not exists
if not os.path.exists(theme):
theme_path = os.sep.join([os.path.dirname(
os.path.abspath(__file__)), "themes/%s" % theme])
if os.path.exists(theme_path):
theme = theme_path
else:
raise Exception("Impossible to find the theme %s" % theme)
if 'SITEURL' not in settings:
settings['SITEURL'] = output_path
# get the list of files to parse
if not path:
raise Exception('you need to specify a path to search the docs on !')
return settings, path, theme, output_path, markup, keep
def run_generators(generators, settings, path, theme, output_path, markup, keep):
"""Run the generators and return"""
context = settings.copy()
generators = [p(context, settings, path, theme, output_path, markup, keep)
for p in generators]
for p in generators:
if hasattr(p, 'generate_context'):
p.generate_context()
# erase the directory if it is not the source
if output_path not in os.path.realpath(path) and not keep:
clean_output_dir(output_path)
writer = Writer(output_path)
for p in generators:
if hasattr(p, 'generate_output'):
p.generate_output(writer)
def run_pelican(settings, path, theme, output_path, markup, delete):
"""Run pelican with the given parameters"""
params = init_params(settings, path, theme, output_path, markup, delete)
generators = [ArticlesGenerator, PagesGenerator, StaticGenerator]
if params[0]['PDF_GENERATOR']: # param[0] is settings
processors.append(PdfGenerator)
run_generators(generators, *params)
def main():
parser = argparse.ArgumentParser(description="""A tool to generate a
static blog, with restructured text input files.""")
parser.add_argument(dest='path',
help='Path where to find the content files')
parser.add_argument('-t', '--theme-path', dest='theme',
help='Path where to find the theme templates. If not specified, it will'
'use the default one included with pelican.')
parser.add_argument('-o', '--output', dest='output',
help='Where to output the generated files. If not specified, a directory'
' will be created, named "output" in the current path.')
parser.add_argument('-m', '--markup', default='', dest='markup',
help='the markup language to use (rst or md).')
parser.add_argument('-s', '--settings', dest='settings',
help='the settings of the application. Default to None.')
parser.add_argument('-k', '--keep-output-directory', dest='keep', action='store_true',
help='Keep the output directory and just update all the generated files. Default is to delete the output directory.')
args = parser.parse_args()
markup = [a.strip().lower() for a in args.markup.split(',')]
run_pelican(args.settings, args.path, args.theme, args.output, markup, args.keep)
if __name__ == '__main__':
main()
| import argparse
import os
from pelican.settings import read_settings
from pelican.utils import clean_output_dir
from pelican.writers import Writer
from pelican.generators import (ArticlesGenerator, PagesGenerator,
StaticGenerator, PdfGenerator)
def init_params(settings=None, path=None, theme=None, output_path=None,
markup=None, keep=False):
"""Read the settings, and performs some checks on the environment
before doing anything else.
"""
if settings is None:
settings = {}
settings = read_settings(settings)
path = path or settings['PATH']
if path.endswith('/'):
path = path[:-1]
# define the default settings
theme = theme or settings['THEME']
output_path = output_path or settings['OUTPUT_PATH']
output_path = os.path.realpath(output_path)
markup = markup or settings['MARKUP']
keep = keep or settings['KEEP_OUTPUT_DIRECTORY']
# find the theme in pelican.theme if the given one does not exists
if not os.path.exists(theme):
theme_path = os.sep.join([os.path.dirname(
os.path.abspath(__file__)), "themes/%s" % theme])
if os.path.exists(theme_path):
theme = theme_path
else:
raise Exception("Impossible to find the theme %s" % theme)
if 'SITEURL' not in settings:
settings['SITEURL'] = output_path
# get the list of files to parse
if not path:
raise Exception('you need to specify a path to search the docs on !')
return settings, path, theme, output_path, markup, keep
def run_generators(generators, settings, path, theme, output_path, markup, keep):
"""Run the generators and return"""
context = settings.copy()
generators = [p(context, settings, path, theme, output_path, markup, keep)
for p in generators]
for p in generators:
if hasattr(p, 'generate_context'):
p.generate_context()
# erase the directory if it is not the source
if output_path not in os.path.realpath(path) and not keep:
clean_output_dir(output_path)
writer = Writer(output_path)
for p in generators:
if hasattr(p, 'generate_output'):
p.generate_output(writer)
def run_pelican(settings, path, theme, output_path, markup, delete):
"""Run pelican with the given parameters"""
params = init_params(settings, path, theme, output_path, markup, delete)
generators = [ArticlesGenerator, PagesGenerator, StaticGenerator]
if params[0]['PDF_GENERATOR']: # param[0] is settings
processors.append(PdfGenerator)
run_generators(generators, *params)
def main():
parser = argparse.ArgumentParser(description="""A tool to generate a
static blog, with restructured text input files.""")
parser.add_argument(dest='path',
help='Path where to find the content files')
parser.add_argument('-t', '--theme-path', dest='theme',
help='Path where to find the theme templates. If not specified, it will'
'use the default one included with pelican.')
parser.add_argument('-o', '--output', dest='output',
help='Where to output the generated files. If not specified, a directory'
' will be created, named "output" in the current path.')
parser.add_argument('-m', '--markup', default='', dest='markup',
help='the markup language to use (rst or md).')
parser.add_argument('-s', '--settings', dest='settings',
help='the settings of the application. Default to None.')
parser.add_argument('-k', '--keep-output-directory', dest='keep', action='store_true',
help='Keep the output directory and just update all the generated files. Default is to delete the output directory.')
args = parser.parse_args()
markup = [a.strip().lower() for a in args.markup.split(',')]
run_pelican(args.settings, args.path, args.theme, args.output, markup, args.keep)
if __name__ == '__main__':
main()
| Python | 0.000005 |
854a1ab7c13b4d4d8e28ab13f0cdaef5c1fcb9a6 | Create solution.py | hackerrank/algorithms/warmup/easy/compare_the_triplets/py/solution.py | hackerrank/algorithms/warmup/easy/compare_the_triplets/py/solution.py | #!/bin/python3
import sys
cmp = lambda a, b: (a > b) - (b > a)
aliceScores = tuple(map(int, input().strip().split(' ')))
bobScores = tuple(map(int, input().strip().split(' ')))
scoreCmp = tuple(map(lambda a, b: cmp(a, b), aliceScores, bobScores))
aliceScore = len(tuple(filter(lambda x: x > 0, scoreCmp)))
bobScore = len(tuple(filter(lambda x: x < 0, scoreCmp)))
print(aliceScore, bobScore)
| Python | 0.000018 | |
4764b5248cf91042a12ce6aef77a04c37360eb4f | Add initial shell of Pyglab class. | pyglab/pyglab.py | pyglab/pyglab.py | import requests
class Pyglab(object):
def __init__(self, token):
self.token = token
self.headers = {'PRIVATE-TOKEN', token}
self.user = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self.user
self.user = user
return previous_user
| Python | 0 | |
70e04b20c5d78b41546aa4ea1a1e2fd82af7527f | Add JSON HttpResponse that does the encoding for you. | comrade/http/__init__.py | comrade/http/__init__.py | from django.core.serializers import json, serialize
from django.db.models.query import QuerySet
from django.http import HttpResponse
from django.utils import simplejson
class HttpJsonResponse(HttpResponse):
def __init__(self, object, status=None):
if isinstance(object, QuerySet):
content = serialize('json', object)
else:
content = simplejson.dumps(object, cls=json.DjangoJSONEncoder,
ensure_ascii=False)
super(HttpJsonResponse, self).__init__(
content, content_type='application/json')
| Python | 0 | |
d3ba2b8cf84ba54d932fcc48b464f125798c0b27 | Add simple bash with git install script | toolbox/install_script_git.sh.py | toolbox/install_script_git.sh.py | #!/bin/bash
venv="nephoria_venv"
neph_branch="oldboto"
adminapi_branch="master"
yum install -y python-devel gcc git python-setuptools python-virtualenv
if [ ! -d adminapi ]; then
git clone https://github.com/nephomaniac/adminapi.git
fi
if [ ! -d nephoria ]; then
git clone https://github.com/nephomaniac/nephoria.git
fi
if [ "x$venv" != "x" ]; then
if [ ! -d $venv ]; then
virtualenv $venv
fi
source $venv/bin/activate
fi
cd adminapi
git checkout $adminapi_branch
python setup.py install
cd -
cd nephoria
git checkout $neph_branch
python setup.py install
cd - | Python | 0 | |
5ae194cacef0a24c3d6a0714d3f435939973b3cb | Add some helpful utilities | utils.py | utils.py | from functools import wraps
def cached_property(f):
name = f.__name__
@property
@wraps(f)
def inner(self):
if not hasattr(self, "_property_cache"):
self._property_cache = {}
if name not in self._property_cache:
self._property_cache[name] = f(self)
return self._property_cache[name]
return inner
class Constant():
def __init__(self, x):
self.x = x
def __repr__(self):
return self.x
| Python | 0 | |
93b1253389075174fa942e848d6c1f7666ffc906 | add solution for Combination Sum II | src/combinationSumII.py | src/combinationSumII.py | class Solution:
# @param candidates, a list of integers
# @param target, integer
# @return a list of lists of integers
def combinationSum2(self, candidates, target):
if not candidates:
return []
candidates.sort()
n = len(candidates)
res = set()
def solve(start, target, tmp):
if target < 0:
return
if target == 0:
res.add(tuple(tmp))
return
for i in xrange(start, n):
tmp.append(candidates[i])
solve(i+1, target-candidates[i], tmp)
tmp.pop()
solve(0, target, [])
return map(list, res)
| Python | 0.000001 | |
19d5b2f58d712f49638dad83996f9e60a6ebc949 | Add a release script. | release.py | release.py | #!/usr/bin/env python
import re
import ast
import subprocess
def version():
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pgcli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
return version
def create_git_tag(tag_name):
cmd = ['git', 'tag', tag_name]
print ' '.join(cmd)
subprocess.check_output(cmd)
def register_with_pypi():
cmd = ['python', 'setup.py', 'register']
print ' '.join(cmd)
subprocess.check_output(cmd)
def create_source_tarball():
cmd = ['python', 'setup.py', 'sdist']
print ' '.join(cmd)
subprocess.check_output(cmd)
if __name__ == '__main__':
ver = version()
print ver
create_git_tag('v%s' % ver)
register_with_pypi()
create_source_tarball()
| Python | 0 | |
7d2906d58db373f5f7326c140e8cb191bc3d0059 | Make other logging work when logging/config_file is in use | mopidy/utils/log.py | mopidy/utils/log.py | from __future__ import unicode_literals
import logging
import logging.config
import logging.handlers
class DelayedHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self._released = False
self._buffer = []
def handle(self, record):
if not self._released:
self._buffer.append(record)
def release(self):
self._released = True
root = logging.getLogger('')
while self._buffer:
root.handle(self._buffer.pop(0))
_delayed_handler = DelayedHandler()
def bootstrap_delayed_logging():
root = logging.getLogger('')
root.setLevel(logging.NOTSET)
root.addHandler(_delayed_handler)
def setup_logging(config, verbosity_level, save_debug_log):
logging.captureWarnings(True)
if config['logging']['config_file']:
# Logging config from file must be read before other handlers are
# added. If not, the other handlers will have no effect.
logging.config.fileConfig(config['logging']['config_file'])
setup_log_levels(config)
setup_console_logging(config, verbosity_level)
if save_debug_log:
setup_debug_logging_to_file(config)
_delayed_handler.release()
def setup_log_levels(config):
for name, level in config['loglevels'].items():
logging.getLogger(name).setLevel(level)
LOG_LEVELS = {
-1: dict(root=logging.ERROR, mopidy=logging.WARNING),
0: dict(root=logging.ERROR, mopidy=logging.INFO),
1: dict(root=logging.WARNING, mopidy=logging.DEBUG),
2: dict(root=logging.INFO, mopidy=logging.DEBUG),
3: dict(root=logging.DEBUG, mopidy=logging.DEBUG),
}
class VerbosityFilter(logging.Filter):
def __init__(self, verbosity_level):
self.verbosity_level = verbosity_level
def filter(self, record):
if record.name.startswith('mopidy'):
required_log_level = LOG_LEVELS[self.verbosity_level]['mopidy']
else:
required_log_level = LOG_LEVELS[self.verbosity_level]['root']
return record.levelno >= required_log_level
def setup_console_logging(config, verbosity_level):
if verbosity_level < min(LOG_LEVELS.keys()):
verbosity_level = min(LOG_LEVELS.keys())
if verbosity_level > max(LOG_LEVELS.keys()):
verbosity_level = max(LOG_LEVELS.keys())
verbosity_filter = VerbosityFilter(verbosity_level)
if verbosity_level < 1:
log_format = config['logging']['console_format']
else:
log_format = config['logging']['debug_format']
formatter = logging.Formatter(log_format)
handler = logging.StreamHandler()
handler.addFilter(verbosity_filter)
handler.setFormatter(formatter)
logging.getLogger('').addHandler(handler)
def setup_debug_logging_to_file(config):
formatter = logging.Formatter(config['logging']['debug_format'])
handler = logging.handlers.RotatingFileHandler(
config['logging']['debug_file'], maxBytes=10485760, backupCount=3)
handler.setFormatter(formatter)
logging.getLogger('').addHandler(handler)
| from __future__ import unicode_literals
import logging
import logging.config
import logging.handlers
class DelayedHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self._released = False
self._buffer = []
def handle(self, record):
if not self._released:
self._buffer.append(record)
def release(self):
self._released = True
root = logging.getLogger('')
while self._buffer:
root.handle(self._buffer.pop(0))
_delayed_handler = DelayedHandler()
def bootstrap_delayed_logging():
root = logging.getLogger('')
root.setLevel(logging.NOTSET)
root.addHandler(_delayed_handler)
def setup_logging(config, verbosity_level, save_debug_log):
logging.captureWarnings(True)
setup_log_levels(config)
setup_console_logging(config, verbosity_level)
if save_debug_log:
setup_debug_logging_to_file(config)
if config['logging']['config_file']:
logging.config.fileConfig(config['logging']['config_file'])
_delayed_handler.release()
def setup_log_levels(config):
for name, level in config['loglevels'].items():
logging.getLogger(name).setLevel(level)
LOG_LEVELS = {
-1: dict(root=logging.ERROR, mopidy=logging.WARNING),
0: dict(root=logging.ERROR, mopidy=logging.INFO),
1: dict(root=logging.WARNING, mopidy=logging.DEBUG),
2: dict(root=logging.INFO, mopidy=logging.DEBUG),
3: dict(root=logging.DEBUG, mopidy=logging.DEBUG),
}
class VerbosityFilter(logging.Filter):
def __init__(self, verbosity_level):
self.verbosity_level = verbosity_level
def filter(self, record):
if record.name.startswith('mopidy'):
required_log_level = LOG_LEVELS[self.verbosity_level]['mopidy']
else:
required_log_level = LOG_LEVELS[self.verbosity_level]['root']
return record.levelno >= required_log_level
def setup_console_logging(config, verbosity_level):
if verbosity_level < min(LOG_LEVELS.keys()):
verbosity_level = min(LOG_LEVELS.keys())
if verbosity_level > max(LOG_LEVELS.keys()):
verbosity_level = max(LOG_LEVELS.keys())
verbosity_filter = VerbosityFilter(verbosity_level)
if verbosity_level < 1:
log_format = config['logging']['console_format']
else:
log_format = config['logging']['debug_format']
formatter = logging.Formatter(log_format)
handler = logging.StreamHandler()
handler.addFilter(verbosity_filter)
handler.setFormatter(formatter)
logging.getLogger('').addHandler(handler)
def setup_debug_logging_to_file(config):
formatter = logging.Formatter(config['logging']['debug_format'])
handler = logging.handlers.RotatingFileHandler(
config['logging']['debug_file'], maxBytes=10485760, backupCount=3)
handler.setFormatter(formatter)
logging.getLogger('').addHandler(handler)
| Python | 0 |
0003b3fe31a1b92dda994b2f7eacf6cef7e08ce4 | Add check_blocked.py | check_blocked.py | check_blocked.py | # This script is licensed under the GNU Affero General Public License
# either version 3 of the License, or (at your option) any later
# version.
#
# This script was tested on GNU/Linux opreating system.
#
# To run this script:
# 1) Download the list of articles for the Wikipedia edition that
# you want to scan from http://download.wikimedia.org.
# 2) Using 'split' command, split th article list into peices. This
# will result in files that start with 'x' eg. 'xaa', xab', etc.
# 3) If you are working on a Wikipedia edition that's different from
# the Arabic one, change self.lang_code into the code of your
# edition.
# 4) Run the script from the directory of the split files.
import urllib2
import time
import os
import codecs
import shelve
class checkBlocked:
def __init__(self):
self.lang_code = 'ar'
self.list_directory = os.getcwd()
self.list_files = [i for i in os.listdir('.') if i.startswith('x')]
self.list_files.sort()
def fetch_list(self, next_list, old_list):
if old_list is not None:
print "Removing list", old_list
os.remove(self.list_directory+'/'+old_list)
list_lines = codecs.open(self.list_directory+'/'+next_list, 'r', encoding="utf-8").readlines()
list_items = [i.strip() for i in list_lines]
return list_items
def is_blocked(self, list_item):
url = "http://%s.wikipedia.org/wiki/" % self.lang_code + urllib2.quote(list_item.encode('utf8'))
print url
while True:
try:
urllib2.urlopen(url)
except urllib2.HTTPError:
print list_item, "isn't blocked."
return False
except urllib2.URLError:
print "Error, retrying..."
time.sleep(1)
continue
print list_item, "is blocked."
return True
def run(self):
old_list = None
try:
for list_file in self.list_files:
database = shelve.open("check_blocked.db")
list_items = self.fetch_list(list_file, old_list)
for list_item in list_items:
if self.is_blocked(list_item):
datebase_key = str(len(database))
datebase[datebase_key] = list_item
old_list = list_file
database.close()
except KeyboardInterrupt:
print "Existing..."
database.close()
if __name__ == '__main__':
bot = checkBlocked()
bot.run()
| Python | 0.000002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.