commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
5e13e3bc045d496232e5ced6b7dc314f14183257 | Create a copy of the Netgen reader example and add some statistics calculation. | jonancm/viennagrid-python,jonancm/viennagrid-python,jonancm/viennagrid-python | doc/examples/viennagrid_wrapper/io_stats.py | doc/examples/viennagrid_wrapper/io_stats.py | #!/usr/bin/env python
#
# This example shows is like the readers and writers example ('io.py'),
# but this one also calculates some statistics on the elapsed time, the
# number of vertices an cells read, etc.
from __future__ import print_function
# In this example, we will set up a domain of triangles in the cartesian 3D
# space from the contents of a Netgen mesh file.
#
# For that purpose, we need to define a domain and, eventually, also a segmentation
# (in case we want to read segmentation data from the mesh file), and we need the
# Netgen reader function, too.
#
# (Notice that the 'read_netgen' function and all other I/O functions
# work with any type of domain and segmentation without name change.)
from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain
from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation
from viennagrid.wrapper import read_netgen
import time
# In case we want to read only the domain information from the mesh file, we would
# just create an empty domain and call the Netgen reader on it with the file path
# where the mesh file can be found.
domain = Domain()
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
# In case we want to read not only the domain information, but also the segmentation
# information from the mesh file, we would have to create an empty domain and an
# empty segmentation on that domain, and then call the Netgen reader.
domain = Domain()
segmentation = Segmentation(domain)
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain, segmentation)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
print('Read segmentation with', segmentation.num_segments, 'segmets')
for i, seg in enumerate(segmentation.segments):
print('Segment #', i, ' contains ', seg.num_cells, ' cells', sep='')
| mit | Python | |
a40f0115892719d1a41e658fcdd20ec1473356f1 | Add stub study ix | hecanjog/pattern.studies | study.ix.py | study.ix.py | from pippi import dsp, tune
from hcj import fx
out = ''
dsp.write(out, 'study.ix')
| cc0-1.0 | Python | |
e64e6327a156cfd72cf629fc05d480e45b2a6e57 | Add __init__.py to page_sets | aosp-mirror/platform_external_skia,rubenvb/skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,HalCanary/skia-hc,HalCanary/skia-hc,rubenvb/skia,HalCanary/skia-hc,HalCanary/skia-hc,rubenvb/skia,HalCanary/skia-hc,rubenvb/skia,google/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,rubenvb/skia,HalCanary/skia-hc,google/skia,rubenvb/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,google/skia,rubenvb/skia,HalCanary/skia-hc,HalCanary/skia-hc,HalCanary/skia-hc,google/skia,google/skia,google/skia,google/skia,rubenvb/skia,google/skia,aosp-mirror/platform_external_skia,rubenvb/skia,rubenvb/skia,HalCanary/skia-hc | tools/skp/page_sets/__init__.py | tools/skp/page_sets/__init__.py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
| bsd-3-clause | Python | |
c0a408759b887006cd09f0ea9f366336b2322fe8 | add new email API file | CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend | api/email.py | api/email.py | """
Atmosphere api email
"""
from django.utils.timezone import datetime
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from libcloud.common.types import InvalidCredsError
from threepio import logger
from authentication.decorators import api_auth_token_required
from authentication.protocol.ldap import lookupEmail
from core.models.provider import AccountProvider
from core.models.volume import convert_esh_volume
from service.volume import create_volume
from service.exceptions import OverQuotaError
from api.serializers import VolumeSerializer
from api import prepare_driver, failure_response, invalid_creds
from web.emails import feedback_email, quota_request_email, support_email
class Feedback(APIView):
"""
Post feedback via RESTful API
"""
@api_auth_token_required
def post(self, request):
"""
Creates a new feedback email and sends it to admins
"""
data = request.DATA
required = ['message',]
missing_keys = valid_post_data(data, required)
if missing_keys:
return keys_not_found(missing_keys)
#Pass arguments
user = request.user
message = data['message']
user_email = lookupEmail(user.username)
result = feedback_email(request, user.username, user_email, message)
return Response(result, status=status.HTTP_201_CREATED)
class QuotaEmail(APIView):
"""
Post Quota Email via RESTful API
"""
@api_auth_token_required
def post(self, request):
"""
Creates a new Quota Request email and sends it to admins
"""
data = request.DATA
required = ['quota', 'reason']
missing_keys = valid_post_data(data, required)
if missing_keys:
return keys_not_found(missing_keys)
#Pass arguments
username = request.user.username
quota = data['quota']
reason = data['reason']
result = quota_request_email(request, username, quota, reason)
return Response(result, status=status.HTTP_201_CREATED)
class SupportEmail(APIView):
"""
Post Support Email via RESTful API
"""
@api_auth_token_required
def post(self, request):
"""
Creates a new support email and sends it to admins
"""
data = request.DATA
required = ['message','subject']
missing_keys = valid_post_data(data, required)
if missing_keys:
return keys_not_found(missing_keys)
#Pass arguments
subject = data['subject']
message = data['message']
result = support_email(request, subject, message)
return Response(result, status=status.HTTP_201_CREATED)
def valid_post_data(data, required_keys):
"""
Return any missing required post key names.
"""
return [key for key in required_keys if not key in data]
def keys_not_found(missing_keys):
return failure_response(
status.HTTP_400_BAD_REQUEST,
'Missing required POST data variables : %s' % missing_keys)
| apache-2.0 | Python | |
2805967f5bf36ee24362e20587eb394c7398753d | add impression call for python | intuit/wasabi,intuit/wasabi,intuit/wasabi,intuit/wasabi,intuit/wasabi,intuit/wasabi | modules/ui/app/resources/samplecode/python/impression.py | modules/ui/app/resources/samplecode/python/impression.py | import requests
import json
def get_impression(application, experiment, user):
"""
Records an impression for the given user and experiment.
Args:
application: the application the experiment runs in
experiment: the running experiment for which the impression should be recorded
user: the user who should be assigned
"""
urlAssignment = "http://abtesting.intuit.com/api/v1/events/applications/%s/experiments/%s/users/%s" %(application, experiment, user);
headers = {'content-type': 'application/json'}
events = {'events':[{'name':'IMPRESSION'}]}
r = requests.post(urlAssignment, data = json.dumps(events), headers=headers)
if r.status_code == 201: # when the request returns 201 the impression was recorded correctly
return True
return False
if __name__ == "__main__":
application = 'ApplicationName'
experiment = 'ExperimentName'
user = 'UserName'
print('Impression recorded' if get_impression(application, experiment, user) else 'Impression not recorded')
| apache-2.0 | Python | |
355aabf2e975b50f552ff2599bb7909d8f63c614 | Create machine.py | trevorwitter/Stats | machine.py | machine.py | def split_data(data, prob):
"""split data into fractions[prob, 1 - prob]"""
results = [], []
for row in data:
results[0 if random.random() < prob else 1]. append(row)
return results
| mit | Python | |
04312795ddc93b5e005d6c1615ff801534ba7457 | Add test capturing failure. Ref #1697. | cherrypy/cherrypy,Safihre/cherrypy,Safihre/cherrypy,cherrypy/cherrypy | cherrypy/test/test_plugins.py | cherrypy/test/test_plugins.py | from cherrypy.process import plugins
__metaclass__ = type
class TestAutoreloader:
def test_file_for_file_module_when_None(self):
"""No error when module.__file__ is None.
"""
class test_module:
__file__ = None
assert plugins.Autoreloader._file_for_file_module(test_module) is None
| bsd-3-clause | Python | |
e1bef44be34efd637bc2acdaf71f01b5d77deaec | Add demo case storage integration | openego/eDisGo,openego/eDisGo | edisgo/flex_opt/storage_integration.py | edisgo/flex_opt/storage_integration.py | from edisgo.grid.components import Storage, Line
from edisgo.grid.tools import select_cable
import logging
def integrate_storage(network, position, operation):
"""
Integrate storage units in the grid and specify its operational mode
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
position : str
Specify storage location. Available options are
* 'hvmv_substation_busbar'
operation : str
Specify mode of storage operation
"""
if position == 'hvmv_substation_busbar':
storage_at_hvmv_substation(network.mv_grid)
else:
logging.error("{} is not a valid storage positioning mode".format(
position))
raise ValueError("Unknown parameter for storage posisitioning: {} is "
"not a valid storage positioning mode".format(
position))
def storage_at_hvmv_substation(mv_grid, nominal_capacity=1000):
"""
Place 1 MVA battery at HV/MV substation bus bar
As this is currently a dummy implementation the storage operation is as
simple as follows:
* Feedin > 50 % -> charge at full power
* Feedin < 50 % -> discharge at full power
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
nominal_capacity : float
Storage's apparent rated power
"""
# define storage instance and define it's operational mode
storage_id = len(mv_grid.graph.nodes_by_attribute('storage')) + 1
storage = Storage(operation={'mode': 'fifty-fifty'},
id=storage_id,
nominal_capacity=nominal_capacity)
# add storage itself to graph
mv_grid.graph.add_nodes_from(storage, type='storage')
# add 1m connecting line to hv/mv substation bus bar
line_type, _ = select_cable(mv_grid.network, 'mv', nominal_capacity)
line = [mv_grid.station, storage,
{'line': Line(
id=storage_id,
type=line_type,
kind='cable',
length=1,
grid=mv_grid)
}]
mv_grid.graph.add_edges_from(line, type='line') | agpl-3.0 | Python | |
e81e2f1f0a4fef0b767368981aceddbc03036be7 | create template filters which return the download/view count for a LR ID | MiltosD/CEF-ELRC,JuliBakagianni/META-SHARE,JuliBakagianni/META-SHARE,JuliBakagianni/META-SHARE,JuliBakagianni/META-SHARE,JuliBakagianni/CEF-ELRC,MiltosD/CEF-ELRC,MiltosD/CEFELRC,MiltosD/CEF-ELRC,MiltosD/CEFELRC,JuliBakagianni/CEF-ELRC,MiltosD/CEFELRC,zeehio/META-SHARE,JuliBakagianni/CEF-ELRC,MiltosD/CEFELRC,MiltosD/CEF-ELRC,JuliBakagianni/META-SHARE,zeehio/META-SHARE,JuliBakagianni/META-SHARE,MiltosD/CEF-ELRC,zeehio/META-SHARE,JuliBakagianni/CEF-ELRC,JuliBakagianni/META-SHARE,zeehio/META-SHARE,MiltosD/CEFELRC,MiltosD/CEFELRC,zeehio/META-SHARE,zeehio/META-SHARE,JuliBakagianni/CEF-ELRC,MiltosD/CEF-ELRC,MiltosD/CEF-ELRC,MiltosD/CEFELRC,JuliBakagianni/CEF-ELRC,zeehio/META-SHARE,JuliBakagianni/CEF-ELRC | metashare/repository/templatetags/resource_access_stats.py | metashare/repository/templatetags/resource_access_stats.py | """
Project: META-SHARE
Author: Christian Spurk <cspurk@dfki.de>
"""
from django import template
from metashare.repository import model_utils
from metashare.stats.model_utils import DOWNLOAD_STAT, VIEW_STAT
# module level "register" variable as required by Django
register = template.Library()
def get_download_count(identifier):
"""
Template filter which returns the download count for the resource with the
given (storage object) identifier string.
If the given identifier should be unknown, then 0 is returned.
"""
return model_utils.get_lr_stat_action_count(identifier, DOWNLOAD_STAT)
register.filter('get_download_count', get_download_count)
def get_view_count(identifier):
"""
Template filter which returns the view count for the resource with the given
(storage object) identifier string.
If the given identifier should be unknown, then 0 is returned.
"""
return model_utils.get_lr_stat_action_count(identifier, VIEW_STAT)
register.filter('get_view_count', get_view_count)
| bsd-3-clause | Python | |
ca702a03c4470931e974cf36ffd8a7a03efdcef4 | move plot_results up | alito/deep_q_rl,alito/deep_q_rl | plot_results.py | plot_results.py | """Plots data corresponding to Figure 2 in
Playing Atari with Deep Reinforcement Learning
Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Alex Graves, Ioannis
Antonoglou, Daan Wierstra, Martin Riedmiller
"""
import sys, os
import logging
import numpy as np
import matplotlib.pyplot as plt
DefaultTrainedEpoch = 100
def read_data(filename):
input_file = open(filename, "rb")
header = input_file.readline().strip().split(',')
columns = {}
for index, column in enumerate(header):
columns[column] = index
results = np.loadtxt(input_file, delimiter=",")
input_file.close()
return columns, results
def plot(results, column_indices, plot_q_values, plot_max_values, game_name):
# Modify this to do some smoothing...
kernel = np.array([1.] * 1)
kernel = kernel / np.sum(kernel)
plot_count = 1
if plot_q_values:
if 'mean_q' not in column_indices:
logging.warn("No mean Q value in results. Skipping")
plot_q_values = False
else:
plot_count += 1
if plot_max_values:
if 'best_reward' not in column_indices:
logging.warn("No max reward per epoch in results. Skipping")
plot_max_values = False
else:
plot_count += 1
scores = plt.subplot(1, plot_count, 1)
plt.plot(results[:, column_indices['epoch']], np.convolve(results[:, column_indices['reward_per_epoch']], kernel, mode='same'), '-*')
scores.set_xlabel('epoch')
scores.set_ylabel('score')
current_sub_plot = 2
if plot_max_values:
max_values = plt.subplot(1, plot_count, current_sub_plot)
current_sub_plot += 1
plt.plot(results[:, column_indices['epoch']], results[:, column_indices['best_reward']], 'r-.')
max_values.set_xlabel('epoch')
max_values.set_ylabel('Max score')
y_limits = max_values.get_ylim()
# set main score's limits to be the same as this one to make comparison easier
scores.set_ylim(y_limits)
if plot_q_values:
qvalues = plt.subplot(1, plot_count, current_sub_plot)
current_sub_plot += 1
plt.plot(results[:, column_indices['epoch']], results[:, column_indices['mean_q']], '-')
qvalues.set_xlabel('epoch')
qvalues.set_ylabel('Q value')
if game_name and plot_count == 1:
scores.set_title(game_name)
plt.show()
def setupLogging(verbosity):
if verbosity == 0:
level = logging.ERROR
elif verbosity == 1:
level = logging.WARNING
elif verbosity == 2:
level = logging.INFO
else:
level = logging.DEBUG
logging.basicConfig(level=level)
def main(args):
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("-v", "--verbose", dest="verbosity", default=0, action="count",
help="Verbosity. Invoke many times for higher verbosity")
parser.add_argument("-g", "--game-name", dest="game_name", default=None,
help="Name of game to put on title")
parser.add_argument("--no-q", dest="plotQValues", default=True, action="store_false",
help="Don't plot the Q values")
parser.add_argument("--no-max", dest="plotMaxValues", default=True, action="store_false",
help="Don't plot the max values")
parser.add_argument("-t", "--trained-epoch", dest="trained_epoch", default=DefaultTrainedEpoch, type=int,
help="Epoch at which we consider the network as trained (default: %(default)s)")
parser.add_argument("results", nargs=1,
help="Results file")
parameters = parser.parse_args(args)
setupLogging(parameters.verbosity)
results_filename = os.path.expanduser(parameters.results[0])
if not os.path.isfile(results_filename) and os.path.isdir(results_filename) and os.path.isfile(os.path.join(results_filename, 'results.csv')):
# They pointed to the directory instead of the filename
results_filename = os.path.join(results_filename, 'results.csv')
column_indices, results = read_data(results_filename)
plot(results, column_indices, parameters.plotQValues, parameters.plotMaxValues, parameters.game_name)
logging.info("Max average score on epoch %s: %s" % (np.argmax(results[:, column_indices['reward_per_epoch']]) + 1, np.max(results[:, column_indices['reward_per_epoch']])))
if 'best_reward' in column_indices:
logging.info("Best score on epoch %s: %s" % (np.argmax(results[:, column_indices['best_reward']]) + 1, np.max(results[:, column_indices['best_reward']])))
logging.info("Average score after %d epochs: %s" % (parameters.trained_epoch, np.mean(results[parameters.trained_epoch:, column_indices['reward_per_epoch']])))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | Python | |
ff9cbbac188f78ed33cb2f650a32777713911384 | Add small demo for a monochrome GST pipeline | TheImagingSource/tiscamera,TheImagingSource/tiscamera,TheImagingSource/tiscamera,TheImagingSource/tiscamera | examples/python/monochrome_pipeline.py | examples/python/monochrome_pipeline.py | import gst
import gobject
import os
VIDEODEVICE = "/dev/video1"
WIDTH = 1280
HEIGHT = 960
FRAMERATE = "15/1"
try:
import psutil
except ImportError:
psutil = None
def show_resources_cb (*args):
process = psutil.Process(os.getpid())
if getattr(process, "memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
elif getattr (process, "get_memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
else:
print ("Unsupported psutil module version")
return True
def bus_watch(bus, message):
if message.type == gst.MESSAGE_ERROR:
print ("Got error message: ", message)
return True
loop = gobject.MainLoop()
source = gst.element_factory_make ("v4l2src")
source.set_property("device", VIDEODEVICE)
flt1 = gst.element_factory_make ("capsfilter")
flt1.set_property("caps", gst.Caps("video/x-raw-gray,width=%d,height=%d,framerate=(fraction)%s" % (WIDTH, HEIGHT, FRAMERATE)))
autoexp = gst.element_factory_make ("tis_auto_exposure")
autoexp.set_property("auto-exposure", True)
bufferfilter = gst.element_factory_make ("tisvideobufferfilter")
csp = gst.element_factory_make ("ffmpegcolorspace")
scale = gst.element_factory_make ("videoscale")
flt2 = gst.element_factory_make ("capsfilter")
flt2.set_property("caps", gst.Caps("video/x-raw-yuv,width=640,height=480"))
sink = gst.element_factory_make ("xvimagesink")
pipeline = gst.Pipeline()
pipeline.get_bus().add_watch(bus_watch)
pipeline.add_many (source, flt1, autoexp, bufferfilter, csp, scale, flt2, sink)
source.link(flt1)
flt1.link(autoexp)
autoexp.link(bufferfilter)
bufferfilter.link(csp)
csp.link(scale)
scale.link(flt2)
flt2.link(sink)
print ("Starting Pipeline")
pipeline.set_state(gst.STATE_PLAYING)
if psutil:
gobject.timeout_add_seconds (1,show_resources_cb)
else:
print ("Install psutil package to get resource usage information")
loop.run()
| apache-2.0 | Python | |
d488afb6416072f9d6557ab19a02487f3666c38c | Tidy up some python | TheTedHogan/shakitz-fantasy-football,TheTedHogan/shakitz-fantasy-football,TheTedHogan/shakitz-fantasy-football,ishakir/shakitz-fantasy-football,ishakir/shakitz-fantasy-football,ishakir/shakitz-fantasy-football,TheTedHogan/shakitz-fantasy-football,ishakir/shakitz-fantasy-football | python/footbawwlapi/__init__.py | python/footbawwlapi/__init__.py | import json
import nflgame
from footbawwlapi.game import Game
def create_all_players(host, port, year, kind, game_weeks):
games = []
for week in game_weeks:
for game in nflgame.games(year, week = week, kind = kind):
games.append(Game(game, week))
if not games:
raise RuntimeError("Couldn't find any {}-games in {}, did you get the year right?".format(kind, year))
offensive_players = {}
defensive_players = {}
for game in games:
for player in game.offensive_players():
if player.playerid not in offensive_players:
offensive_players[player.playerid] = player
for player in game.defensive_players():
if player.team not in defensive_players:
defensive_players[player.team] = player
all_players = dict(offensive_players.items() + defensive_players.items())
total_no_players = len(all_players.keys())
counter = 1
for key, value in all_players.iteritems():
print "Uploading player "+value.name+" "+str(counter)+"/"+str(total_no_players)
response = value.get_api_facade(host, port).create()
if response.status_code != 200:
print "Error creating player "+player.name+" code was "+str(response.status_code)
counter += 1
def update_player_stats(host, port, player):
print "Updating stats for player "+player.name
api_facade = player.get_api_facade(host, port)
response = api_facade.update_stats()
if response.status_code != 200:
print "ERROR: Got response code "+str(response.status_code)+" from player "+player.name+" in team "+player.team
response_json = json.loads(response.text)
print response_json
for message in response_json['messages']:
print message['message']
if response.status_code == 404:
print "Creating player "+player.name
api_facade.create()
update_player_stats(host, port, player)
def update_stats(host, port, year, kind, game_week):
games = nflgame.games(year, week = game_week, kind = kind)
for nfl_game in games:
game = Game(nfl_game, game_week)
for player in game.all_players():
update_player_stats(host, port, player)
| import json
import nflgame
from footbawwlapi.game import Game
def create_all_players(host, port, year, kind, game_weeks):
games = []
for week in game_weeks:
print year
print week
print kind
for game in nflgame.games(year, week = week, kind = kind):
games.append(Game(game, week))
if not games:
raise RuntimeError("Couldn't find any {}-games in {}, did you get the year right?".format(kind, year))
offensive_players = {}
defensive_players = {}
for game in games:
for player in game.offensive_players():
if player.playerid not in offensive_players:
offensive_players[player.playerid] = player
for player in game.defensive_players():
if player.team not in defensive_players:
defensive_players[player.team] = player
all_players = dict(offensive_players.items() + defensive_players.items())
total_no_players = len(all_players.keys())
counter = 1
for key, value in all_players.iteritems():
print "Uploading player "+value.name+" "+str(counter)+"/"+str(total_no_players)
response = value.get_api_facade(host, port).create()
if response.status_code != 200:
print "Error creating player "+player.name+" code was "+str(response.status_code)
counter += 1
def update_player_stats(host, port, player):
print "Updating stats for player "+player.name
api_facade = player.get_api_facade(host, port)
response = api_facade.update_stats()
if response.status_code != 200:
print "ERROR: Got response code "+str(response.status_code)+" from player "+player.name+" in team "+player.team
response_json = json.loads(response.text)
print response_json
for message in response_json['messages']:
print message['message']
if response.status_code == 404:
print "Creating player "+player.name
api_facade.create()
update_player_stats(host, port, player)
def update_stats(host, port, year, kind, game_week):
games = nflgame.games(year, week = game_week, kind = kind)
for nfl_game in games:
game = Game(nfl_game, game_week)
for player in game.all_players():
update_player_stats(host, port, player)
| epl-1.0 | Python |
bab230c0d6debd6b1ab2c521f21d133841c1a7f5 | add a test for simple rnn | nebw/keras,keras-team/keras,kuza55/keras,kemaswill/keras,DeepGnosis/keras,relh/keras,keras-team/keras,dolaameng/keras,daviddiazvico/keras | tests/keras/layers/test_simplernn.py | tests/keras/layers/test_simplernn.py | import theano
import unittest
from numpy.testing import assert_allclose
import numpy as np
from keras.layers.recurrent import SimpleRNN
from mock import Mock
floatX = theano.config.floatX
__author__ = "Jeff Ye"
class TestSimpleRNN(unittest.TestCase):
left_padding_data = np.array(
[
[ # batch 1
[0], [1], [2], [3]
],
[ # batch 2
[0], [0], [1], [2]
]
], dtype=floatX)
left_padding_mask = np.array( # n_sample x n_time
[
[ # batch 1
0, 1, 1, 1
],
[ # batch 2
0, 0, 1, 1
]
], dtype=np.int32)
def setUp(self):
W = np.array([[1]], dtype=floatX)
U = np.array([[1]], dtype=floatX)
b = np.array([0], dtype=floatX)
weights = [W, U, b]
self.forward = SimpleRNN(output_dim=1, activation='linear', weights=weights)
self.backward = SimpleRNN(output_dim=1, activation='linear', weights=weights)
previous = Mock()
previous.nb_input = 1
previous.nb_output = 1
previous.output_shape = self.left_padding_data.shape
previous.get_output_mask = Mock()
self.previous = previous
def test_left_padding(self):
forward = self.forward
forward.go_backwards = False
forward.return_sequences = True
self.previous.get_output.return_value = theano.shared(value=self.left_padding_data)
self.previous.get_output_mask.return_value = theano.shared(value=self.left_padding_mask)
forward.set_previous(self.previous)
np.testing.assert_allclose(forward.get_output().eval(),
np.array([
[[0], [1], [3], [6]],
[[0], [0], [1], [3]]]))
backward = self.backward
backward.go_backwards = True
backward.return_sequences = True
self.previous.get_output.return_value = theano.shared(value=self.left_padding_data)
self.previous.get_output_mask.return_value = theano.shared(value=self.left_padding_mask)
backward.set_previous(self.previous)
np.testing.assert_allclose(backward.get_output().eval(),
np.array([
[[3], [5], [6], [0]],
[[2], [3], [0], [0]]]))
| apache-2.0 | Python | |
62b77547179fb7b480f433efed874731efa81ae7 | Add to_video.py | zhaipro/misc,zhaipro/misc | to_video.py | to_video.py | import os
import sys
import cv2
def to_video(ipath, ofn):
writer = None
ifns = os.listdir(ipath)
ifns.sort()
for ifn in ifns:
ifn = os.path.join(ipath, ifn)
frame = cv2.imread(ifn)
if not writer:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
h, w, _ = frame.shape
writer = cv2.VideoWriter(ofn, fourcc, 25, (w, h))
writer.write(frame)
writer.release()
if __name__ == '__main__':
ipath, ofn = sys.argv[1], sys.argv[2]
to_video(ipath, ofn)
| mit | Python | |
eaef37130b4978f9aaf741cfa0f4926d72a29744 | add ID3 | luanjunyi/simple-ml | tree/id3.py | tree/id3.py | from collections import defaultdict
import numpy as np
import pandas as pd
class ID3(object):
def __init__(self):
pass
def fit(self, X, y):
self.root_ = self.build_node(X, y, set(range(X.shape[1])), range(X.shape[0]))
def predict_prob(self, X):
return [self.predict_one_prob(self.root_, x) for x in X]
def predict(self, X):
return [self.predict_one(self.root_, x) for x in X]
def predict_one_prob(self, node, x):
if node.is_leave:
return node.prob
val = x[node.split_column]
if val in node.children:
return self.predict_one_prob(node.children[val], x)
else:
return node.prob
def predict_one(self, node, x):
prob = self.predict_one_prob(node, x)
return sorted(prob.items(), key = lambda t: -t[1])[0][0]
def build_node(self, X, y, column_candidates, record_indexes):
#import pdb; pdb.set_trace()
if len(record_indexes) == 0:
raise Exception('record_indexes is empty')
H = ID3.entropy(y[record_indexes])
if len(column_candidates) == 0 or len(np.unique(y[record_indexes])) == 1:
return Node.make_node(y[record_indexes], entropy=H, is_leave=True)
ret = Node.make_node(y[record_indexes], entropy=H, is_leave=False)
info_gain = {}
for col in column_candidates:
split_indexes = self.split(X, record_indexes, col)
cur_H = 0
for index in split_indexes:
cur_H += len(index) * 1.0 / len(record_indexes) * ID3.entropy(y[index])
info_gain[col] = H - cur_H
col, gain = sorted(info_gain.items(), key=lambda t: -t[1])[0]
split_indexes = self.split(X, record_indexes, col)
ret.children = {}
ret.split_column = col
ret.gain = gain
#pdb.set_trace()
for index in split_indexes:
candidates_for_child = column_candidates.copy()
candidates_for_child.remove(col)
child = self.build_node(X, y, candidates_for_child, index)
val = X[index[0]][col]
ret.children[val] = child
return ret
def split(self, X, cur_index, col):
value_idx = defaultdict(list)
for i in cur_index:
value_idx[X[i, col]].append(i)
return value_idx.values()
@staticmethod
def entropy(y):
y = pd.Series(y)
n = len(y) * 1.0
value_counts = y.value_counts()
probs = value_counts.to_dict()
for v in probs:
probs[v] = probs[v] / n
entropy = sum([-p * np.log2(p) for p in probs.values()])
return entropy
class Node(object):
def __init__(self, is_leave, entropy):
self.entropy = entropy
self.col = None
self.children = None
self.is_leave = is_leave
self.prob = None
self.split_column = None
self.gain = None
@staticmethod
def make_node(y, entropy, is_leave=False):
ret = Node(is_leave=True, entropy=entropy)
ret.is_leave = is_leave
n = float(len(y))
value_counts = pd.Series(y).value_counts()
prob = value_counts.to_dict()
for v in prob:
prob[v] = prob[v] / n
ret.prob = prob
return ret
| mit | Python | |
d287286cd14752fd3f3feddfc8a4fb64f3b21872 | Add migration | rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org | readthedocs/builds/migrations/0007_add-automation-rules.py | readthedocs/builds/migrations/0007_add-automation-rules.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-28 16:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('projects', '0040_increase_path_max_length'),
('contenttypes', '0002_remove_content_type_name'),
('builds', '0006_add_config_field'),
]
operations = [
migrations.CreateModel(
name='VersionAutomationRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('priority', models.IntegerField(verbose_name='Rule priority')),
('rule_arg', models.CharField(max_length=255, verbose_name='Value used for the rule to match the version')),
('action', models.CharField(choices=[('activate-version', 'Activate version on match')], max_length=32, verbose_name='Action')),
('action_arg', models.CharField(blank=True, max_length=255, null=True, verbose_name='Value used for the action to perfom an operation')),
('version_type', models.CharField(choices=[('branch', 'Branch'), ('tag', 'Tag'), ('unknown', 'Unknown')], max_length=32, verbose_name='Version type')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_builds.versionautomationrule_set+', to='contenttypes.ContentType')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='automation_rules', to='projects.Project')),
],
options={
'ordering': ('priority', '-modified', '-created'),
'manager_inheritance_from_future': True,
},
),
migrations.CreateModel(
name='RegexAutomationRule',
fields=[
],
options={
'proxy': True,
'manager_inheritance_from_future': True,
'indexes': [],
},
bases=('builds.versionautomationrule',),
),
migrations.AlterUniqueTogether(
name='versionautomationrule',
unique_together=set([('project', 'priority')]),
),
]
| mit | Python | |
5972c05015c96b1b4b9195a687cf09dab036363a | Add resources tests. | m110/grafcli,m110/grafcli | tests/test_resources.py | tests/test_resources.py | #!/usr/bin/python3
import os
import sys
import unittest
LIB_PATH = os.path.dirname(os.path.realpath(__file__)) + '/../'
CONFIG_PATH = os.path.join(LIB_PATH, 'grafcli.example.conf')
sys.path.append(LIB_PATH)
from grafcli.config import load_config
load_config(CONFIG_PATH)
from grafcli.resources import Resources
from grafcli.exceptions import InvalidPath
class ResourcesTest(unittest.TestCase):
def test_list_empty(self):
r = Resources()
self.assertEqual(r.list(None), ['host.example.com', 'dashboards', 'rows', 'panels'])
def test_parse_path(self):
r = Resources()
manager, parts = r._parse_path('/dashboards/a/b/c')
self.assertEqual(manager, r._local_resources)
self.assertListEqual(parts, ['dashboards', 'a', 'b', 'c'])
manager, parts = r._parse_path('/host.example.com/a/b/c')
self.assertEqual(manager, r._remote_resources)
self.assertListEqual(parts, ['host.example.com', 'a', 'b', 'c'])
with self.assertRaises(InvalidPath):
r._parse_path('/invalid/path')
if __name__ == "__main__":
unittest.main()
| mit | Python | |
57d657344fcfe0c4df477fc1d7caad75d6f0b125 | test the shipment properties and shipment functions from the order object | mollie/mollie-api-python | tests/test_shipments.py | tests/test_shipments.py | from mollie.api.objects.order import Order
from mollie.api.objects.order_line import OrderLine
from mollie.api.objects.shipment import Shipment
from .utils import assert_list_object
ORDER_ID = 'ord_kEn1PlbGa'
SHIPMENT_ID = 'shp_3wmsgCJN4U'
def test_get_shipment(client, response):
"""Retrieve a single shipment by a shipment's ID."""
response.get('https://api.mollie.com/v2/orders/{order_id}'.format(order_id=ORDER_ID), 'order_single')
response.get('https://api.mollie.com/v2/orders/{order_id}/shipments/{shipment_id}'.format(
order_id=ORDER_ID, shipment_id=SHIPMENT_ID), 'shipment_single')
order = client.orders.get(ORDER_ID)
shipment = order.get_shipment(SHIPMENT_ID)
assert isinstance(shipment, Shipment)
assert shipment.resource == 'shipment'
assert shipment.id == SHIPMENT_ID
assert shipment.order_id == ORDER_ID
assert shipment.created_at == '2018-08-09T14:33:54+00:00'
assert shipment.tracking == {
'carrier': 'PostNL',
'code': '3SKABA000000000',
'url': 'http://postnl.nl/tracktrace/?B=3SKABA000000000&P=1016EE&D=NL&T=C'
}
assert shipment.has_tracking() is True
assert shipment.has_tracking_url() is True
assert shipment.tracking_url == 'http://postnl.nl/tracktrace/?B=3SKABA000000000&P=1016EE&D=NL&T=C'
def test_get_shipment_lines(client, response):
"""Retrieve a single shipment and the order lines shipped by a shipment's ID."""
response.get('https://api.mollie.com/v2/orders/{order_id}'.format(order_id=ORDER_ID), 'order_single')
response.get('https://api.mollie.com/v2/orders/{order_id}/shipments/{shipment_id}'.format(
order_id=ORDER_ID, shipment_id=SHIPMENT_ID), 'shipment_single')
order = client.orders.get(ORDER_ID)
shipment = order.get_shipment(SHIPMENT_ID)
lines = shipment.lines
assert_list_object(lines, OrderLine)
def test_get_order_from_shipment(client, response):
"""Retrieve the order from the shipment object"""
response.get('https://api.mollie.com/v2/orders/{order_id}'.format(order_id=ORDER_ID), 'order_single')
response.get('https://api.mollie.com/v2/orders/{order_id}/shipments/{shipment_id}'.format(
order_id=ORDER_ID, shipment_id=SHIPMENT_ID), 'shipment_single')
order = client.orders.get(ORDER_ID)
shipment = order.get_shipment(SHIPMENT_ID)
assert isinstance(shipment.order, Order)
assert order == shipment.order
def test_create_shipment(client, response):
"""Create a shipment of an order object"""
response.get('https://api.mollie.com/v2/orders/{order_id}'.format(order_id=ORDER_ID), 'order_single')
response.post('https://api.mollie.com/v2/orders/ord_kEn1PlbGa/shipments', 'shipment_single')
data = {
'lines': [
{
'id': 'odl_dgtxyl',
'quantity': 1
},
{
'id': 'odl_dgtxyb'
}
],
'tracking': {
'carrier': 'PostNL',
'code': '3SKABA000000000',
'url': 'http://postnl.nl/tracktrace/?B=3SKABA000000000&P=1016EE&D=NL&T=C'
},
}
order = client.orders.get(ORDER_ID)
new_shipment = order.create_shipment(data)
assert isinstance(new_shipment, Shipment)
def test_update_shipment(client, response):
"""Update the tracking information of a shipment"""
response.get('https://api.mollie.com/v2/orders/{order_id}'.format(order_id=ORDER_ID), 'order_single')
response.patch('https://api.mollie.com/v2/orders/{order_id}/shipments/{shipment_id}'.format(
order_id=ORDER_ID, shipment_id=SHIPMENT_ID), 'shipment_single')
order = client.orders.get(ORDER_ID)
data = {
'tracking': {
'carrier': 'PostNL',
'code': '3SKABA000000000',
'url': 'http://postnl.nl/tracktrace/?B=3SKABA000000000&P=1016EE&D=NL&T=C'
},
}
updated_shipment = order.update_shipment(SHIPMENT_ID, data)
assert isinstance(updated_shipment, Shipment)
assert updated_shipment.id == SHIPMENT_ID
def test_list_shipments(client, response):
"""Retrieve all shipments for an order."""
response.get('https://api.mollie.com/v2/orders/{order_id}'.format(order_id=ORDER_ID), 'order_single')
response.get('https://api.mollie.com/v2/orders/{order_id}/shipments'.format(order_id=ORDER_ID), 'shipments_list')
order = client.orders.get(ORDER_ID)
shipments = order.shipments
assert_list_object(shipments, Shipment)
| bsd-2-clause | Python | |
870ed1b76209b8850f300842a67416c4a69523e3 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/9e718d5cacc5ec4964120a626a851af2798c2322. | tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "9e718d5cacc5ec4964120a626a851af2798c2322"
TFRT_SHA256 = "aa53d5bec70c1d029b803b279995d41b41b957e516cec10bead250bb09ae9794"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "38f4f5c359ecb321cfd3a085f930dd71a7f620ab"
TFRT_SHA256 = "0cf2a09389a0271a8433a6642b5aa327d4cbb86999938a2e7d77615a55635ac5"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
dbf1c4d098ffa180c8fa6564f5ad909e198a03c3 | Add servo lib | DreamN/Smart-Tollbooth,DreamN/Smart-Tollbooth,DreamN/Smart-Tollbooth,DreamN/Smart-Tollbooth | servo.py | servo.py | #################################################################
## SERVO LIB for SMART TOLLBOOTH PROJECT ##
#################################################################
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
SERVO = 17
GPIO.setup(SERVO, GPIO.OUT)
def moveDeg(i):
val = 0.001 + (i * 0.002 / 180)
for x in range(260):
GPIO.output(SERVO, GPIO.HIGH)
time.sleep(val)
GPIO.output(SERVO, GPIO.LOW)
time.sleep(val)
def closeBarrier():
moveDeg(0)
def openBarrier():
moveDeg(90) | mit | Python | |
251590eda4c373a4be2f6690633873392ce5c86c | implement a customized CLA model that can enable/disable TP/SP learning | ywcui1990/nupic.research,mrcslws/htmresearch,ywcui1990/htmresearch,chanceraine/nupic.research,ywcui1990/htmresearch,ywcui1990/htmresearch,ywcui1990/htmresearch,cogmission/nupic.research,numenta/htmresearch,neuroidss/nupic.research,ThomasMiconi/htmresearch,numenta/htmresearch,BoltzmannBrain/nupic.research,BoltzmannBrain/nupic.research,numenta/htmresearch,ywcui1990/nupic.research,neuroidss/nupic.research,numenta/htmresearch,cogmission/nupic.research,numenta/htmresearch,marionleborgne/nupic.research,ThomasMiconi/htmresearch,ywcui1990/htmresearch,marionleborgne/nupic.research,ThomasMiconi/htmresearch,BoltzmannBrain/nupic.research,mrcslws/htmresearch,ThomasMiconi/htmresearch,subutai/htmresearch,subutai/htmresearch,mrcslws/htmresearch,ywcui1990/nupic.research,mrcslws/htmresearch,subutai/htmresearch,ThomasMiconi/htmresearch,ThomasMiconi/nupic.research,BoltzmannBrain/nupic.research,chanceraine/nupic.research,subutai/htmresearch,cogmission/nupic.research,neuroidss/nupic.research,ywcui1990/htmresearch,BoltzmannBrain/nupic.research,marionleborgne/nupic.research,subutai/htmresearch,mrcslws/htmresearch,marionleborgne/nupic.research,numenta/htmresearch,ThomasMiconi/htmresearch,ThomasMiconi/nupic.research,neuroidss/nupic.research,ywcui1990/nupic.research,ThomasMiconi/nupic.research,ywcui1990/nupic.research,ThomasMiconi/htmresearch,chanceraine/nupic.research,numenta/htmresearch,ywcui1990/nupic.research,ywcui1990/htmresearch,ThomasMiconi/nupic.research,cogmission/nupic.research,BoltzmannBrain/nupic.research,ywcui1990/htmresearch,cogmission/nupic.research,ThomasMiconi/nupic.research,cogmission/nupic.research,ThomasMiconi/nupic.research,marionleborgne/nupic.research,ThomasMiconi/nupic.research,ywcui1990/nupic.research,marionleborgne/nupic.research,mrcslws/htmresearch,mrcslws/htmresearch,ywcui1990/nupic.research,numenta/htmresearch,subutai/htmresearch,neuroidss/nupic.research,ThomasMiconi/nupic.research,mrcslws/htmresearch,marionleborgne/nupic.research,cogmission/nupic.research,subutai/htmresearch,chanceraine/nupic.research,marionleborgne/nupic.research,chanceraine/nupic.research,BoltzmannBrain/nupic.research,subutai/htmresearch,neuroidss/nupic.research,neuroidss/nupic.research,neuroidss/nupic.research,cogmission/nupic.research,chanceraine/nupic.research,ThomasMiconi/htmresearch,BoltzmannBrain/nupic.research | sequence_prediction/continuous_sequence/clamodel_custom.py | sequence_prediction/continuous_sequence/clamodel_custom.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.frameworks.opf.clamodel import CLAModel
from nupic.frameworks.opf.opfutils import (InferenceType)
class CLAModel_custom(CLAModel):
def __init__(self,
**kwargs):
super(CLAModel_custom, self).__init__(**kwargs)
self._spLearningEnabled = True
self._tpLearningEnabled = True
# override _spCompute
def _spCompute(self):
sp = self._getSPRegion()
if sp is None:
return
sp.setParameter('topDownMode', False)
sp.setParameter('inferenceMode', self.isInferenceEnabled())
sp.setParameter('learningMode', self._spLearningEnabled)
sp.prepareInputs()
sp.compute()
# overide _tpCompute
def _tpCompute(self):
tp = self._getTPRegion()
if tp is None:
return
if (self.getInferenceType() == InferenceType.TemporalAnomaly or
self._isReconstructionModel()):
topDownCompute = True
else:
topDownCompute = False
tp = self._getTPRegion()
tp.setParameter('topDownMode', topDownCompute)
tp.setParameter('inferenceMode', self.isInferenceEnabled())
tp.setParameter('learningMode', self._tpLearningEnabled)
tp.prepareInputs()
tp.compute()
| agpl-3.0 | Python | |
b5a210fac3941298fdf9160948c2cc73c27bfccd | add missing module electrum_plugins | fyookball/electrum,FairCoinTeam/electrum-fair,procrasti/electrum,dabura667/electrum,asfin/electrum,dashpay/electrum-dash,argentumproject/electrum-arg,protonn/Electrum-Cash,spesmilo/electrum,vertcoin/electrum-vtc,fyookball/electrum,FairCoinTeam/electrum-fair,fujicoin/electrum-fjc,aasiutin/electrum,kyuupichan/electrum,dabura667/electrum,cryptapus/electrum-uno,imrehg/electrum,vertcoin/electrum-vtc,fireduck64/electrum,fujicoin/electrum-fjc,lbryio/lbryum,aasiutin/electrum,fujicoin/electrum-fjc,fyookball/electrum,cryptapus/electrum-myr,neocogent/electrum,pooler/electrum-ltc,spesmilo/electrum,digitalbitbox/electrum,argentumproject/electrum-arg,procrasti/electrum,romanz/electrum,neocogent/electrum,cryptapus/electrum,wakiyamap/electrum-mona,fireduck64/electrum,digitalbitbox/electrum,digitalbitbox/electrum,spesmilo/electrum,pooler/electrum-ltc,romanz/electrum,argentumproject/electrum-arg,cryptapus/electrum-uno,FairCoinTeam/electrum-fair,imrehg/electrum,dashpay/electrum-dash,cryptapus/electrum,fireduck64/electrum,digitalbitbox/electrum,imrehg/electrum,imrehg/electrum,aasiutin/electrum,FairCoinTeam/electrum-fair,pknight007/electrum-vtc,protonn/Electrum-Cash,dashpay/electrum-dash,molecular/electrum,kyuupichan/electrum,neocogent/electrum,fireduck64/electrum,cryptapus/electrum-myr,dashpay/electrum-dash,cryptapus/electrum-myr,aasiutin/electrum,asfin/electrum,pknight007/electrum-vtc,pooler/electrum-ltc,vertcoin/electrum-vtc,vertcoin/electrum-vtc,protonn/Electrum-Cash,dabura667/electrum,molecular/electrum,pknight007/electrum-vtc,dabura667/electrum,cryptapus/electrum-uno,protonn/Electrum-Cash,vialectrum/vialectrum,molecular/electrum,wakiyamap/electrum-mona,vialectrum/vialectrum,asfin/electrum,procrasti/electrum,cryptapus/electrum-myr,cryptapus/electrum,vialectrum/vialectrum,wakiyamap/electrum-mona,molecular/electrum,argentumproject/electrum-arg,romanz/electrum,pooler/electrum-ltc,lbryio/lbryum,spesmilo/electrum,procrasti/electrum,wakiyamap/electrum-mona,cryptapus/electrum-uno,kyuupichan/electrum,pknight007/electrum-vtc | setup.py | setup.py | #!/usr/bin/env python2
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (2, 7, 0):
sys.exit("Error: Electrum requires Python version >= 2.7.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
usr_share = os.path.join(sys.prefix, "share")
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
install_requires=[
'slowaes>=0.1a1',
'ecdsa>=0.9',
'pbkdf2',
'requests',
'qrcode',
'protobuf',
'dnspython',
],
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.exchange_rate',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.plot',
'electrum_plugins.trezor',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="GNU GPLv3",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet"""
)
| #!/usr/bin/env python2
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (2, 7, 0):
sys.exit("Error: Electrum requires Python version >= 2.7.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
usr_share = os.path.join(sys.prefix, "share")
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
install_requires=[
'slowaes>=0.1a1',
'ecdsa>=0.9',
'pbkdf2',
'requests',
'qrcode',
'protobuf',
'dnspython',
],
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.exchange_rate',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.plot',
'electrum_plugins.trezor',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="GNU GPLv3",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet"""
)
| mit | Python |
60bc275a7c45278ffe9269abf2c1191a73df9d40 | add setup.py (#6) | pinkblock/rotoscoping,pinkblock/rotoscoping | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='rotoscoping',
version='1.0',
description="Take in pics swap faces and make'm pretty",
author='Seith Miller',
author_email='seithmiller@gmail.com',
url='https://www.seithmiller.com/',
packages=['rotoscoping'],
)
| mit | Python | |
01d21e43cedd191c57e3d505d9f308185012bdef | Remove power metric from jsgamebench benchmark | bright-sparks/chromium-spacewalk,ltilve/chromium,Chilledheart/chromium,bright-sparks/chromium-spacewalk,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,M4sse/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,ltilve/chromium,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,patrickm/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,fujunwei/chromium-crosswalk,anirudhSK/chromium,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,littlstar/chromium.src,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,anirudhSK/chromium,Just-D/chromium-1,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,anirudhSK/chromium,dushu1203/chromium.src,jaruba/chromium.src,littlstar/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,jaruba/chromium.src,anirudhSK/chromium,littlstar/chromium.src,Chilledheart/chromium,Chilledheart/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,jaruba/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,anirudhSK/chromium,chuan9/chromium-crosswalk,M4sse/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,Fireblend/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,littlstar/chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,patrickm/chromium.src,jaruba/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,ltilve/chromium,markYoungH/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,patrickm/chromium.src,Fireblend/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,anirudhSK/chromium,ltilve/chromium,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,ondra-novak/chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,M4sse/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl | tools/perf/benchmarks/jsgamebench.py | tools/perf/benchmarks/jsgamebench.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Facebook's JSGameBench benchmark."""
import os
from telemetry import test
from telemetry.page import page_measurement
from telemetry.page import page_set
class _JsgamebenchMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_JsgamebenchMeasurement, self).__init__()
def MeasurePage(self, page, tab, results):
tab.ExecuteJavaScript('UI.call({}, "perftest")')
tab.WaitForJavaScriptExpression(
'document.getElementById("perfscore0") != null', 1800)
js_get_results = 'document.getElementById("perfscore0").innerHTML'
result = int(tab.EvaluateJavaScript(js_get_results))
results.Add('Score', 'score (bigger is better)', result)
class Jsgamebench(test.Test):
"""Counts how many animating sprites can move around on the screen at once."""
test = _JsgamebenchMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../page_sets/data/jsgamebench.json',
'pages': [
{ 'url': 'http://localhost/' }
]
}, os.path.dirname(__file__))
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Facebook's JSGameBench benchmark."""
import os
from metrics import power
from telemetry import test
from telemetry.page import page_measurement
from telemetry.page import page_set
class _JsgamebenchMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_JsgamebenchMeasurement, self).__init__()
self._power_metric = power.PowerMetric()
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def MeasurePage(self, page, tab, results):
tab.ExecuteJavaScript('UI.call({}, "perftest")')
tab.WaitForJavaScriptExpression(
'document.getElementById("perfscore0") != null', 1800)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'document.getElementById("perfscore0").innerHTML'
result = int(tab.EvaluateJavaScript(js_get_results))
results.Add('Score', 'score (bigger is better)', result)
class Jsgamebench(test.Test):
"""Counts how many animating sprites can move around on the screen at once."""
test = _JsgamebenchMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../page_sets/data/jsgamebench.json',
'pages': [
{ 'url': 'http://localhost/' }
]
}, os.path.dirname(__file__))
| bsd-3-clause | Python |
9ee46e93aca97f69a2ad75a2b9b2db52196bfcba | Create trumpbot.py | WebBreacher/trumpdroidretweeter,WebBreacher/trumpdroidretweeter | trumpbot.py | trumpbot.py | #!/usr/bin/python
'''
Author : Micah Hoffman (@WebBreacher)
Description : Trump Retweeting Bot that retweets Android-sourced tweets.
'''
# Import Libraries
import re
import tweepy
from creds import *
# Set up temp file for most recent tweet id
tweetHistoryFile = open('/tmp/trumpbot_lasttweet', 'r+')
last_tweet = tweetHistoryFile.read()
if last_tweet is None:
# The tweet history file was empty or invalid
last_tweet = 5555555555
# Access and authorize our Twitter credentials from credentials.py
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Grab the tweets that are more recent than the last one in the last_tweet file
info = api.user_timeline('realdonaldtrump', since_id=last_tweet)
# Cycle through each tweet retrieved and look for the source to be Android
for item in info:
if re.search('android', item.source, re.I):
api.retweet(item.id)
# Reset the last_tweet to be the more recent one
if int(last_tweet) < int(item.id):
last_tweet = item.id
# Reset the position in the last_tweet file to be at the beginning
position = tweetHistoryFile.seek(0, 0);
tweetHistoryFile.write(str(last_tweet))
tweetHistoryFile.close()
| mit | Python | |
67a295d10df752b3a4d932efa9e9750d60273c09 | add script to index streams from a video | Ziggeo/ZiggeoPythonSdk,Ziggeo/ZiggeoPythonSdk | demos/stream_index.py | demos/stream_index.py | import sys
from Ziggeo import Ziggeo
import json
if(len(sys.argv) < 4):
print ("Error\n")
print ("Usage: $>python stream_index.py YOUR_API_TOKEN YOUR_PRIVATE_KEY VIDEO_TOKEN \n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
video_token = sys.argv[3]
ziggeo = Ziggeo(api_token, private_key)
print (ziggeo.streams().index(video_token)) | apache-2.0 | Python | |
906196a28fecab43ec6e33e3517c7159f66a0a52 | Add init | userzimmermann/robotframework-python3,Senseg/robotframework,userzimmermann/robotframework-python3,Senseg/robotframework,Senseg/robotframework,userzimmermann/robotframework-python3,userzimmermann/robotframework-python3,Senseg/robotframework,Senseg/robotframework,userzimmermann/robotframework-python3 | src/robot/result/__init__.py | src/robot/result/__init__.py | # Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| apache-2.0 | Python | |
841c79ca1e8850eda140bc09fc889dd693ecd5f6 | Add setup.py so pylogmet can be installed | locke105/pylogmet | setup.py | setup.py | import setuptools
setuptools.setup(
name='pylogmet',
version='0.1.0',
packages=setuptools.find_packages(),
)
| apache-2.0 | Python | |
9c88998a5d40f623e1c98006492393d3b2ad718a | add initial unit test skeleton | Kraymer/beets,jackwilsdon/beets,xsteadfastx/beets,sampsyo/beets,MyTunesFreeMusic/privacy-policy,pkess/beets,artemutin/beets,sampsyo/beets,artemutin/beets,xsteadfastx/beets,madmouser1/beets,madmouser1/beets,lengtche/beets,MyTunesFreeMusic/privacy-policy,artemutin/beets,jackwilsdon/beets,shamangeorge/beets,shamangeorge/beets,Kraymer/beets,ibmibmibm/beets,pkess/beets,ibmibmibm/beets,Kraymer/beets,diego-plan9/beets,diego-plan9/beets,madmouser1/beets,ibmibmibm/beets,SusannaMaria/beets,beetbox/beets,mosesfistos1/beetbox,xsteadfastx/beets,jackwilsdon/beets,SusannaMaria/beets,mosesfistos1/beetbox,diego-plan9/beets,ibmibmibm/beets,sampsyo/beets,MyTunesFreeMusic/privacy-policy,beetbox/beets,diego-plan9/beets,SusannaMaria/beets,pkess/beets,mosesfistos1/beetbox,madmouser1/beets,beetbox/beets,shamangeorge/beets,Kraymer/beets,sampsyo/beets,beetbox/beets,lengtche/beets,mosesfistos1/beetbox,lengtche/beets,xsteadfastx/beets,shamangeorge/beets,artemutin/beets,pkess/beets,jackwilsdon/beets,lengtche/beets,MyTunesFreeMusic/privacy-policy,SusannaMaria/beets | test/test_discogs.py | test/test_discogs.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for discogs plugin.
"""
from __future__ import division, absolute_import, print_function
from test import _common
from test._common import unittest, Bag
from beetsplug.discogs import DiscogsPlugin
class DGAlbumInfoTest(_common.TestCase):
def _make_release(self, date_str='2009', tracks=None, track_length=None,
track_artist=False):
"""Returns a Bag that mimics a discogs_client.Release. The list
of elements on the returned Bag is incomplete, including just
those required for the tests on this class."""
data = {
'id': 'ALBUM ID',
'uri': 'ALBUM URI',
'title': 'ALBUM TITLE',
'year': '3001',
'artists': [{
'name': 'ARTIST NAME',
'id': 'ARTIST ID',
'join': ','
}],
'formats': [{
'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'],
'name': 'FORMAT',
'qty': 1
}],
'labels': [{
'name': 'LABEL NAME',
'catno': 'CATALOG NUMBER',
}],
'tracklist': []
}
if tracks:
for recording in tracks:
data['tracklist'].append(recording)
return Bag(data=data,
# Make some fields available as properties, as they are
# accessed by DiscogsPlugin methods.
title=data['title'],
artists=[Bag(data=d) for d in data['artists']])
def _make_track(self, title, position='', duration='', type_=None):
track = {
'title': title,
'position': position,
'duration': duration
}
if type_ is not None:
# Test samples on discogs_client do not have a 'type_' field, but
# the API seems to return it. Values: 'track' for regular tracks,
# 'heading' for descriptive texts (ie. not real tracks - 12.13.2).
track['type_'] = type_
return track
def test_set_media_for_tracks(self):
tracks = [self._make_track('TITLE ONE', '1', '01:01'),
self._make_track('TITLE TWO', '2', '02:02')]
release = self._make_release(tracks=tracks)
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.media, 'FORMAT')
self.assertEqual(t[0].media, d.media)
self.assertEqual(t[1].media, d.media)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit | Python | |
99a71ccd5ed1282a86bca7535a31ec949afa71bf | Create tulkinta.py | taloprojekti/SP2017,taloprojekti/SP2017 | tulkinta.py | tulkinta.py | import setup
import lampotila
def lukuohjelma():
import lukuohjelma
lukuohjelma.main()
def fav():
fav = setup.Tfav()
return fav
def Tmin():
Tmin = setup.Tmin()
return Tmin
def Tmax():
Tmax = setup.Tmax()
return Tmax
def delta():
delta = lampotila.delta()
return delta
def hintadelta((year - 2000), month, day, hour):
import lukeminen
string = str(day + month + (year - 2000))
tiedot = lukeminen.luetiedot(string)
minimi = lukeminen.minimi()
delta = lukeminen.delta(tiedot, minimi)
def sisalampotila()
sisanyt = tempread.read_temp_in()
return sisanyt
def main((year - 2000), month, day, hour):
lukuohjelma()
Tfav = fav()
Tmin = Tmin()
Tmax = Tmax()
Tdelta = delta()
Pdelta = hintadelta((year - 2000), month, day, hour)
Tnow = sisalampotila()
| mit | Python | |
06bb72423587d37c86f46a9f298c9c4f36805543 | copy and pasted some sections of chatbot to a new module, trainbot | corinnelhh/chatbot,corinnelhh/chatbot | trainbot.py | trainbot.py | class Trainbot(object):
def __init__(self, training_file="tell_tale_heart.txt"):
self.training_file = training_file
self.tri_lexicon = {}
self.bi_lexicon = {}
self.stop_puncts = ['.', '!', '?']
self.puncts = [',', ';', ':', '"', "'", '-', '--', ",?", '."']
def parse_training_input(self, text):
while True:
our_text = text.read(2048)
if not our_text:
break
yield wordpunct_tokenize(our_text)
def remove_non_final_punctuation(self, our_list):
for i in our_list[:]:
if i in self.puncts:
our_list.remove(i)
return our_list
def tag_input(self, our_string):
our_string = wordpunct_tokenize(our_string)
return pos_tag(our_string)
def fill_lexicon(self):
f = open(self.training_file)
for words in self.parse_training_input(f):
words = self.remove_non_final_punctuation(words)
for idx, word in enumerate(words[2:]):
word_pair = "{} {}".format(words[idx - 2], words[idx - 1])
first_word = str(words[idx - 2])
second_word = str(words[idx - 1])
if first_word not in self.bi_lexicon:
self.bi_lexicon[first_word] = [second_word]
if word_pair not in self.tri_lexicon:
self.tri_lexicon[word_pair] = [word]
else:
self.bi_lexicon[first_word].append(second_word)
self.tri_lexicon[word_pair].append(word) | mit | Python | |
08a6dddb866ec53ff45a302d7c163d041bbefe71 | Add stub unit test for options resolving | deffi/protoplot | protoplot-test/test_options_resolving.py | protoplot-test/test_options_resolving.py | import unittest
from protoplot.engine.item import Item
from protoplot.engine.item_container import ItemContainer
class Series(Item):
pass
Series.options.register("color", True)
Series.options.register("lineWidth", False)
Series.options.register("lineStyle", False)
class TestOptionsResolving(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOptionsResolving(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| agpl-3.0 | Python | |
983bf009012e69ed703302fc02aef32bd33a6f12 | add timings | simomarsili/ndd | utils/timings.py | utils/timings.py | # -*- coding: utf-8 -*-
"""Measure execution average execution times"""
import cProfile
import time
import numpy as np
from numpy.random import dirichlet, multinomial
from scipy.stats import entropy as sp_entropy
import ndd
import ndd.fnsb # pylint: disable=no-name-in-module, import-error
R = 200 # number of repetitions
N = 10000
K = 1000000
ALPHA = 0.1
np.random.seed(42)
def dimul(alpha, n, size=None):
"""Dirichlet-multinomial"""
pvals = dirichlet(alpha)
return multinomial(n, pvals, size=size)
def entropy(counts, k):
"""ndd.entropy() execution time"""
start = time.time()
_ = ndd.entropy(counts, k, return_std=True)
end = time.time()
return end - start, 0
def scipy_entropy(counts, k): # pylint: disable=unused-argument
"""scipy.stats.entropy() execution time"""
start = time.time()
_ = sp_entropy(counts)
end = time.time()
return end - start, 0
def average_timings(ar):
"""Average execution times."""
# pylint: disable=no-member
labels = 'init range fortran python scipy_entropy'.split()
funcs = (ndd.fnsb.phony_1, ndd.fnsb.phony_2, ndd.fnsb.phony_4, entropy,
scipy_entropy)
times = np.zeros(len(funcs))
for counts in ar:
for j, f in enumerate(funcs):
times[j] += f(counts, K)[0]
times /= R
print('total fortran/python/scipy time: %e/%e/%e' % tuple(times[-3:]))
for j, label in enumerate(labels):
t0 = times[j - 1] if j > 0 else 0
print('%s: %e' % (label, times[j] - t0))
def cprofile(ar):
"""Run cprofile"""
return [ndd.entropy(x, K) for x in ar]
if __name__ == '__main__':
# run cProfile
a = dimul([ALPHA] * K, N, size=(R))
cProfile.run('cprofile(a)')
average_timings(a)
| bsd-3-clause | Python | |
60d57f24a76976d82de37fda40d3a081cc497e72 | add setup.py. | avidal/django-pyodbc,javrasya/django-pyodbc-azure,michiya/django-pyodbc-azure,avidal/django-pyodbc | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='sql_server.pyodbc',
version='1.0',
description='Django MS SQL Server backends using pyodbc',
author='django-pyodbc team',
url='http://code.google.com/p/django-pyodbc',
packages=['sql_server', 'sql_server.pyodbc', 'sql_server.extra'],
)
| bsd-3-clause | Python | |
93fea6b7553e1098707e87259f60ce08ec841d85 | add setup.py | rishubil/sqlalchemy-fulltext-search | setup.py | setup.py | """
SQLAlchemy FullText Search
"""
from setuptools import setup, Command
setup(
name='SQLAlchemy-FullText-Search',
version='0.1',
url='https://github.com/mengzhuo/sqlalchemy-fulltext-search',
license='BSD',
author='Meng Zhuo',
author_email='mengzhuo1203@gmail.com',
description=('Provide FullText for MYSQL & SQLAlchemy model'),
long_description = __doc__,
packages=['sqlalchemy_fulltext'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=['SQLAlchemy>=0.8',],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules' ]
)
| mit | Python | |
fc9a1aaa11fb45ba1acf898a351455beb13e5066 | Update django and python version in classifiers | coddingtonbear/django-mailbox,ad-m/django-mailbox | setup.py | setup.py | from setuptools import find_packages, setup
from django_mailbox import __version__ as version_string
tests_require = [
'django',
'mock',
'unittest2',
]
gmail_oauth2_require = [
'python-social-auth',
]
setup(
name='django-mailbox',
version=version_string,
url='http://github.com/coddingtonbear/django-mailbox/',
description=(
'Import mail from POP3, IMAP, local mailboxes or directly from '
'Postfix or Exim4 into your Django application automatically.'
),
license='MIT',
author='Adam Coddington',
author_email='me@adamcoddington.net',
extras_require={
'gmail-oauth2': gmail_oauth2_require
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Post-Office',
'Topic :: Communications :: Email :: Post-Office :: IMAP',
'Topic :: Communications :: Email :: Post-Office :: POP3',
'Topic :: Communications :: Email :: Email Clients (MUA)',
],
packages=find_packages(),
include_package_data = True,
install_requires=[
'six>=1.6.1'
]
)
| from setuptools import find_packages, setup
from django_mailbox import __version__ as version_string
tests_require = [
'django',
'mock',
'unittest2',
]
gmail_oauth2_require = [
'python-social-auth',
]
setup(
name='django-mailbox',
version=version_string,
url='http://github.com/coddingtonbear/django-mailbox/',
description=(
'Import mail from POP3, IMAP, local mailboxes or directly from '
'Postfix or Exim4 into your Django application automatically.'
),
license='MIT',
author='Adam Coddington',
author_email='me@adamcoddington.net',
extras_require={
'gmail-oauth2': gmail_oauth2_require
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Post-Office',
'Topic :: Communications :: Email :: Post-Office :: IMAP',
'Topic :: Communications :: Email :: Post-Office :: POP3',
'Topic :: Communications :: Email :: Email Clients (MUA)',
],
packages=find_packages(),
include_package_data = True,
install_requires=[
'six>=1.6.1'
]
)
| mit | Python |
bd4556dcd663f78aaa50d247e198e8e2d7604d52 | Add a setup.py file to ease setuptools installation, thanks Ariel Nunez | e-loue/django-oauth-plus | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='django-oauth',
version='0.1',
description='Support of OAuth in Django.',
author='David Larlet',
author_email='david@larlet.fr',
url='http://code.welldev.org/django-oauth/',
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
# Make setuptools include all data files under version control,
# svn and CVS by default
include_package_data=True,
zip_safe=False,
# Tells setuptools to download setuptools_git before running setup.py so
# it can find the data files under Hg version control.
setup_requires=['hg.setuptools'],
)
| bsd-3-clause | Python | |
f8a22b7f58b29031adf23a9b91748ccc0909d141 | Add 'setup.py' #3 | 7pairs/toggl2html,7pairs/toggl2html | setup.py | setup.py | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
from toggl2html import __version__
setup(
name='toggl2html',
version=__version__,
description='Tools for converting Toggl CSV to HTML',
author='Jun-ya HASEBA',
author_email='7pairs@gmail.com',
url='http://seven-pairs.hatenablog.jp/',
packages=find_packages(),
install_requires=['Jinja2', 'docopt'],
entry_points="""\
[console_scripts]
toggl2html = toggl2html.toggl2html:main
"""
)
| apache-2.0 | Python | |
d2f6bf7a0ee1e3387d007b39832ef61b49a5c9c4 | Add config | authmillenon/wakefs | wakefs/config.py | wakefs/config.py | import ConfigParser, os.path
class Config(object):
_configfile = os.path.expanduser('~/.wakefs/config')
def __new__(type, *args):
if not '_the_instance' in type.__dict__:
type._the_instance = object.__new__(type)
return type._the_instance
def __init__(self,configfile=None):
if not '_ready' in dir(self):
self._ready = True
if configfile != None:
Config._configfile = str(configfile)
configdir = os.path.dirname(Config._configfile)
self._parser = ConfigParser.SafeConfigParser({
'database_uri': 'sqlite://'+os.path.expanduser('~/.wakefs/db.sqlite'),
})
if not os.path.exists(configdir) and len(configdir) > 0:
os.mkdir(configdir)
else:
if os.path.exists(Config._configfile):
self._parser.read(configfile)
def __getattribute__(self, name):
if name == "_parser":
return object.__getattribute__(self, name)
try:
value = self._parser.get("DEFAULT", name)
return value
except ConfigParser.NoOptionError:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == 'database_uri':
self._parser.set("DEFAULT", "database_uri", value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == 'database_uri':
self._parser.remove_option("DEFAULT", "database_uri")
else:
object.__delattr__(self, name)
def close(self):
configfile = open(Config._configfile,'wb')
self._parser.write(configfile)
configfile.close()
Config._config = None
def __del__(self):
self.close()
| mit | Python | |
21ccfab04b57e71af706648fac6d6d01fa2b8500 | add setup file | Hyperyon/p3-labyrinthe | setup.py | setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import labyrinth
with open('requirements.txt') as f:
requires = f.read().split('\n')
setup(
name='oc-labyrinth',
version=3.2,
packages=find_packages(),
install_requires=requires,
author='Nico Zhan',
author_email='nicozhan@hyperloop.fr',
description='Help Mc Gyver to leave the maze',
long_description=open('README.md').read(),
# include file from manifest.in
include_package_data=True,
url='https://github.com/Hyperyon/p3-labyrinthe',
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Game',
],
) | mit | Python | |
b81d7c8d54caf4e334e1cbe550223d9c112a6de5 | Add setup.py file for doing pip installs | qdot/np-telegram-bot,qdot/np-telegram-bot | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='np-telegram-bot',
version='0.1',
description='NP Labs Telegram Bot Base Code',
author='Kyle Machulis',
author_email='kyle@machul.is',
url='https://github.com/qdot/np-telegram-bot/',
packages=["nptelegrambot"]
)
| bsd-3-clause | Python | |
cc40de21219934e50a840305bc9416b71e9eb2ed | add bominn.py | choznerol/c4lab-git-tutorial | exercise-1_from-pull-to-push/bominn.py | exercise-1_from-pull-to-push/bominn.py | print('bomin')
| mit | Python | |
585c9b6f1c8bf186fee34303ba29b7b511c1ba7e | Add a script to add mapit area IDs to new Place objects for 2013 | geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,hzj123/56th,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,mysociety/pombola | mzalendo/core/management/commands/core_match_places_to_mapit_areas_2013.py | mzalendo/core/management/commands/core_match_places_to_mapit_areas_2013.py | import sys
from optparse import make_option
from pprint import pprint
from django.core.management.base import NoArgsCommand
from django.template.defaultfilters import slugify
from django.conf import settings
# from helpers import geocode
from core import models
from mapit import models as mapit_models
class Command(NoArgsCommand):
help = 'Link places to areas in mapit for the new 2013 places'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
self.match_for_types(type_code='con',
mapit_generation=3,
place_kind_slug='constituency',
session_slug='na2013',
commit=options['commit'])
self.match_for_types(type_code='dis',
mapit_generation=3,
place_kind_slug='county',
session_slug='s2013',
commit=options['commit'],
suffix=True)
def match_for_types(self, type_code, mapit_generation, place_kind_slug, session_slug, commit, suffix=False):
# Get these even if not used so that we know that they exist
area_type = mapit_models.Type.objects.get( code = type_code )
generation = mapit_models.Generation.objects.get( pk = mapit_generation )
place_kind = models.PlaceKind.objects.get( slug = place_kind_slug )
session = models.ParliamentarySession.objects.get(slug = session_slug)
# Find all relevant areas to match
areas = mapit_models.Area.objects.filter(type=area_type,
generation_low__lte=generation,
generation_high__gte=generation)
all_places = set(models.Place.objects.filter(kind=place_kind, parliamentary_session=session))
for area in areas:
# Use the slug for matching, easiest way to normalize
slug = slugify( area.name )
if suffix:
slug += '-' + place_kind.slug
else:
slug += '-2013'
# find it and update, or print out an error for a human to follow up
try:
place = models.Place.objects.get(slug=slug,
kind=place_kind,
parliamentary_session=session)
place.mapit_area = area
if commit:
print >> sys.stderr, "Saving", place
place.save()
else:
print >> sys.stderr, "Not saving %s, since --commit wasn't specified" % (place,)
all_places.discard(place)
except models.Place.DoesNotExist:
print "Could not find matching place for mapit area '%s' (%s, %s)" % ( area.name, slug, place_kind_slug )
if all_places:
for place in all_places:
print "Could not find the place %s in MapIt (%s)" % (place, slugify(place.name))
| agpl-3.0 | Python | |
f107494d841ffd9adeecc49014e9334c98279385 | Make author name ASCII | s0undt3ch/powerline,firebitsbr/powerline,DoctorJellyface/powerline,cyrixhero/powerline,QuLogic/powerline,darac/powerline,xfumihiro/powerline,kenrachynski/powerline,dragon788/powerline,wfscheper/powerline,EricSB/powerline,xxxhycl2010/powerline,DoctorJellyface/powerline,IvanAli/powerline,keelerm84/powerline,DoctorJellyface/powerline,IvanAli/powerline,junix/powerline,Liangjianghao/powerline,kenrachynski/powerline,junix/powerline,lukw00/powerline,seanfisk/powerline,S0lll0s/powerline,blindFS/powerline,EricSB/powerline,lukw00/powerline,bezhermoso/powerline,lukw00/powerline,s0undt3ch/powerline,Liangjianghao/powerline,russellb/powerline,magus424/powerline,Luffin/powerline,prvnkumar/powerline,bezhermoso/powerline,darac/powerline,xxxhycl2010/powerline,firebitsbr/powerline,bartvm/powerline,bartvm/powerline,kenrachynski/powerline,S0lll0s/powerline,QuLogic/powerline,Luffin/powerline,xxxhycl2010/powerline,seanfisk/powerline,S0lll0s/powerline,firebitsbr/powerline,areteix/powerline,dragon788/powerline,dragon788/powerline,s0undt3ch/powerline,cyrixhero/powerline,areteix/powerline,xfumihiro/powerline,seanfisk/powerline,QuLogic/powerline,magus424/powerline,blindFS/powerline,Liangjianghao/powerline,russellb/powerline,keelerm84/powerline,magus424/powerline,prvnkumar/powerline,xfumihiro/powerline,EricSB/powerline,wfscheper/powerline,wfscheper/powerline,Luffin/powerline,cyrixhero/powerline,areteix/powerline,prvnkumar/powerline,bezhermoso/powerline,darac/powerline,bartvm/powerline,junix/powerline,IvanAli/powerline,blindFS/powerline,russellb/powerline | setup.py | setup.py | #!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import unicode_literals
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst'), 'rb').read().decode('utf-8')
except IOError:
README = ''
old_python = sys.version_info < (2, 7)
setup(
name='Powerline',
version='beta',
description='The ultimate statusline/prompt utility.',
long_description=README,
classifiers=[],
author='Kim Silkebaekken',
author_email='kim.silkebaekken+vim@gmail.com',
url='https://github.com/Lokaltog/powerline',
scripts=[
'scripts/powerline',
'scripts/powerline-lint',
],
keywords='',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
install_requires=[],
extras_require={
'docs': [
'Sphinx',
],
},
test_suite='tests' if not old_python else None,
)
| #!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import unicode_literals
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst'), 'rb').read().decode('utf-8')
except IOError:
README = ''
old_python = sys.version_info < (2, 7)
setup(
name='Powerline',
version='beta',
description='The ultimate statusline/prompt utility.',
long_description=README,
classifiers=[],
author='Kim Silkebækken',
author_email='kim.silkebaekken+vim@gmail.com',
url='https://github.com/Lokaltog/powerline',
scripts=[
'scripts/powerline',
'scripts/powerline-lint',
],
keywords='',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
install_requires=[],
extras_require={
'docs': [
'Sphinx',
],
},
test_suite='tests' if not old_python else None,
)
| mit | Python |
1836748476be53b7a65a5a07ae900837e0283298 | Add Django 1.9 to setup.py | keimlink/django-cms,jsma/django-cms,jproffitt/django-cms,evildmp/django-cms,netzkolchose/django-cms,yakky/django-cms,mkoistinen/django-cms,bittner/django-cms,bittner/django-cms,jsma/django-cms,divio/django-cms,evildmp/django-cms,jsma/django-cms,FinalAngel/django-cms,rsalmaso/django-cms,FinalAngel/django-cms,timgraham/django-cms,czpython/django-cms,benzkji/django-cms,netzkolchose/django-cms,timgraham/django-cms,czpython/django-cms,datakortet/django-cms,czpython/django-cms,bittner/django-cms,benzkji/django-cms,jproffitt/django-cms,FinalAngel/django-cms,czpython/django-cms,mkoistinen/django-cms,datakortet/django-cms,rsalmaso/django-cms,timgraham/django-cms,yakky/django-cms,nimbis/django-cms,mkoistinen/django-cms,evildmp/django-cms,nimbis/django-cms,evildmp/django-cms,rsalmaso/django-cms,netzkolchose/django-cms,jsma/django-cms,vxsx/django-cms,datakortet/django-cms,keimlink/django-cms,bittner/django-cms,keimlink/django-cms,jproffitt/django-cms,vxsx/django-cms,divio/django-cms,vxsx/django-cms,FinalAngel/django-cms,nimbis/django-cms,vxsx/django-cms,divio/django-cms,benzkji/django-cms,divio/django-cms,yakky/django-cms,yakky/django-cms,mkoistinen/django-cms,benzkji/django-cms,netzkolchose/django-cms,jproffitt/django-cms,nimbis/django-cms,datakortet/django-cms,rsalmaso/django-cms | setup.py | setup.py | from setuptools import setup, find_packages
import os
import cms
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
]
INSTALL_REQUIREMENTS = [
'Django>=1.6.9,<1.10',
'django-classy-tags>=0.5',
'html5lib>=0.90,!=0.9999,!=0.99999',
'django-formtools>=1.0',
'django-treebeard>=3.0',
'django-sekizai>=0.7',
'djangocms-admin-style',
]
#
# NOTE: divio/django-formtools is IDENTICAL to django/django-formtools except
# that its Django requirement has been relaxed to >=Django>=1.6. This is because
# this version of django CMS supports Django 1.6+. Internally, CMS will use
# django.contrib.formtools when available, then look for the external version if
# required. Unfortunately, SetupTools doesn't allow use to load the external
# library when using Django 1.7+ only.
#
# Further note that dependency links do not work by default. Current versions of
# Pip support it with the flag `--process-dependency-links`
#
# Remove these machinations in CMS v3.3 when Django 1.6 support is dropped.
#
DEPENDENCY_LINKS = [
"https://github.com/divio/django-formtools/archive/master.zip#egg=django-formtools",
]
setup(
author='Patrick Lauber',
author_email='digi@treepy.com',
name='django-cms',
version=cms.__version__,
description='An Advanced Django CMS',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://www.django-cms.org/',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIREMENTS,
dependency_links=DEPENDENCY_LINKS,
extras_require={
'south': ['south>=1.0.0'],
},
packages=find_packages(exclude=['project', 'project.*']),
include_package_data=True,
zip_safe=False,
test_suite='runtests.main',
)
| from setuptools import setup, find_packages
import os
import cms
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
]
INSTALL_REQUIREMENTS = [
'Django>=1.6.9,<1.9',
'django-classy-tags>=0.5',
'html5lib>=0.90,!=0.9999,!=0.99999',
'django-formtools>=1.0',
'django-treebeard>=3.0',
'django-sekizai>=0.7',
'djangocms-admin-style',
]
#
# NOTE: divio/django-formtools is IDENTICAL to django/django-formtools except
# that its Django requirement has been relaxed to >=Django>=1.6. This is because
# this version of django CMS supports Django 1.6+. Internally, CMS will use
# django.contrib.formtools when available, then look for the external version if
# required. Unfortunately, SetupTools doesn't allow use to load the external
# library when using Django 1.7+ only.
#
# Further note that dependency links do not work by default. Current versions of
# Pip support it with the flag `--process-dependency-links`
#
# Remove these machinations in CMS v3.3 when Django 1.6 support is dropped.
#
DEPENDENCY_LINKS = [
"https://github.com/divio/django-formtools/archive/master.zip#egg=django-formtools",
]
setup(
author='Patrick Lauber',
author_email='digi@treepy.com',
name='django-cms',
version=cms.__version__,
description='An Advanced Django CMS',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://www.django-cms.org/',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIREMENTS,
dependency_links=DEPENDENCY_LINKS,
extras_require={
'south': ['south>=1.0.0'],
},
packages=find_packages(exclude=['project', 'project.*']),
include_package_data=True,
zip_safe=False,
test_suite='runtests.main',
)
| bsd-3-clause | Python |
b5767a9dbc90e8252ca0885212f33df08a5a3e66 | Create setup.py | tuxerr/IA | setup.py | setup.py | import sys
from cx_Freeze import setup, Executable
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name = "IA",
version = "0.1",
description = "IA",
options = {"build_exe" : {"includes" : ["atexit","re"],"include_files":["resources"]}},
executables = [Executable("main.py", base = base)])
| mit | Python | |
c82062bced242d0aa4675f2a66620c99149dfb63 | bump to dev version | phpdude/django-markitup,zsiciarz/django-markitup,carljm/django-markitup,dustinfarris/django-markitup,rvasilev/django-markitup-widget,senturio/django-markitup,WimpyAnalytics/django-markitup,senturio/django-markitup,zsiciarz/django-markitup,Pyha/django-markitup-py3.3,carljm/django-markitup,phpdude/django-markitup,WimpyAnalytics/django-markitup,WimpyAnalytics/django-markitup,dustinfarris/django-markitup,zsiciarz/django-markitup,carljm/django-markitup,rvasilev/django-markitup-widget,Pyha/django-markitup-py3.3 | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='django-markitup',
version='0.3.1dev',
description='Django integration with the MarkItUp universal markup editor',
long_description=open('README.txt').read(),
author='Carl Meyer',
author_email='carl@dirtcircle.com',
url='http://bitbucket.org/carljm/django-markitup/',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
zip_safe=False,
package_data={'markitup': ['templates/markitup/*.html',
'media/markitup/*.*',
'media/markitup/sets/*/*.*',
'media/markitup/sets/*/images/*.png',
'media/markitup/skins/*/*.*',
'media/markitup/skins/*/images/*.png',
'media/markitup/templates/*.*']}
)
| from setuptools import setup, find_packages
setup(
name='django-markitup',
version='0.3.0',
description='Django integration with the MarkItUp universal markup editor',
long_description=open('README.txt').read(),
author='Carl Meyer',
author_email='carl@dirtcircle.com',
url='http://bitbucket.org/carljm/django-markitup/',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
zip_safe=False,
package_data={'markitup': ['templates/markitup/*.html',
'media/markitup/*.*',
'media/markitup/sets/*/*.*',
'media/markitup/sets/*/images/*.png',
'media/markitup/skins/*/*.*',
'media/markitup/skins/*/images/*.png',
'media/markitup/templates/*.*']}
)
| bsd-3-clause | Python |
aa0839cb8f26d2b80e53e2dec41ddcd4a66003f4 | Update setup.py | gschizas/praw,gschizas/praw,praw-dev/praw,praw-dev/praw | setup.py | setup.py | """praw setup.py"""
import re
from codecs import open
from os import path
from setuptools import find_packages, setup
PACKAGE_NAME = "praw"
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
setup(
name=PACKAGE_NAME,
author="Bryce Boe",
author_email="bbzbryce@gmail.com",
python_requires=">=3.5",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
description=(
"PRAW, an acronym for `Python Reddit API Wrapper`, is a "
"python package that allows for simple access to "
"reddit's API."
),
extras_require={"dev": ["pre-commit"]},
install_requires=[
"prawcore >=1.0.1, <2.0",
"update_checker >=0.16",
"websocket-client >=0.54.0",
],
keywords="reddit api wrapper",
license="Simplified BSD License",
long_description=README,
package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]},
packages=find_packages(exclude=["tests", "tests.*"]),
setup_requires=["pytest-runner >=2.1"],
tests_require=[
"betamax >=0.8, <0.9",
"betamax-matchers >=0.3.0, <0.5",
"betamax-serializers >=0.2, <0.3",
"mock >=0.8",
"pytest >=2.7.3",
],
test_suite="tests",
url="https://praw.readthedocs.org/",
version=VERSION,
)
| """praw setup.py"""
import re
from codecs import open
from os import path
from setuptools import find_packages, setup
PACKAGE_NAME = "praw"
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
setup(
name=PACKAGE_NAME,
author="Bryce Boe",
author_email="bbzbryce@gmail.com",
python_requires=">=3.5",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
description=(
"PRAW, an acronym for `Python Reddit API Wrapper`, is a "
"python package that allows for simple access to "
"reddit's API."
),
extras_require={"dev": ["pre-commit"]},
install_requires=[
"prawcore >=1.0.1, <2.0",
"update_checker >=0.16",
"websocket-client >=0.54.0",
],
keywords="reddit api wrapper",
license="Simplified BSD License",
long_description=README,
package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]},
packages=find_packages(exclude=["tests", "tests.*"]),
setup_requires=["pytest-runner >=2.1"],
tests_require=[
"betamax >=0.8, <0.9",
"betamax-matchers >=0.3.0, <0.5",
"betamax-serializers >=0.2, <0.3",
"mock >=0.8",
"pytest >=2.7.3",
],
test_suite="tests",
url="https://praw.readthedocs.org/",
version=VERSION,
)
| bsd-2-clause | Python |
a84d2abc0f601df0d889ea08dee8ba88593f401d | Create setup.py | fnielsen/emocapper | setup.py | setup.py | """Installation and setup configuration."""
from setuptools import setup
setup(
name='emocapper',
description="Installation and setup configuration",
py_modules=['emocapper'],
entry_points={
'console_scripts': [
'emocapper = emocapper:main',
],
},
)
| bsd-3-clause | Python | |
fe68f04fe89e50183cd413ae8833a487bc9bd0d0 | Define random walk traversal | peplin/trinity | stats/random_walk.py | stats/random_walk.py | import neo4j
import random
from logbook import Logger
log = Logger('trinity.topics')
DEFAULT_DEPTH = 5
NUM_WALKS = 100
# Passed sorted list (desc order), return top nodes
TO_RETURN = lambda x: x[:10]
random.seed()
def random_walk(graph, node, depth=DEFAULT_DEPTH):
# Pick random neighbor
neighbors = {}
i = 0
for r in node.relationships().outgoing:
#TODO replace with i + r['count']
neighbors[(i, i + 1)] = r.getOtherNode(node)
i += 1
choice = random.range(i)
for x,y in neighbors:
if x <= i and i < y:
return [node].extend(random_walk(graph, neighbors[(x,y)], depth-1))
def run(graph, index, node):
nodes = {}
for i in range(NUM_WALKS):
with graph.transaction:
walked_nodes = random_walk(graph, node)
# Loop through nodes (that aren't the start node), count
for n in filter(lambda m: m.id != node.id, walked_nodes):
if nodes.has_key(n):
nodes[n]++
else
nodes[n] = 1
return TO_RETURN(sorted(nodes, key=nodes.__getitem__))
| mit | Python | |
24db3e122ffa68cf64738ffd6fd2f6ef1a51142b | add setup.py | googleapis/synthtool,googleapis/synthtool,googleapis/synthtool,googleapis/synthtool,googleapis/synthtool | setup.py | setup.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
name = 'synthtool'
description = ''
version = '0.1.0'
release_status = 'Development Status :: 3 - Alpha'
dependencies = [
"googleapis-artman >= 0.11.0",
"colorlog < 3.0.0",
"jinja2"
]
packages = ['synthtool']
setuptools.setup(
name=name,
version=version,
description=description,
author='Google LLC',
author_email='googleapis-packages@oogle.com',
license='Apache 2.0',
url='',
classifiers=[
release_status,
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Topic :: Internet',
],
platforms='Posix; MacOS X; Windows',
packages=packages,
install_requires=dependencies,
include_package_data=True,
zip_safe=False,
)
| apache-2.0 | Python | |
e1a38a3085b5381b5f61a3aeb5d0fdf6fff80502 | add setup.py visual.py | what-studio/gauge | setup.py | setup.py | # -*- coding: utf-8 -*-
"""
"""
from __future__ import with_statement
import re
from setuptools import setup
from setuptools.command.test import test
# detect the current version
with open('gauge.py') as f:
version = re.search(r'__version__\s*=\s*\'(.+?)\'', f.read()).group(1)
assert version
# use pytest instead
def run_tests(self):
raise SystemExit(__import__('pytest').main([]))
test.run_tests = run_tests
setup(
name='gauge',
version=version,
license='BSD',
author='Heungsub Lee',
author_email=re.sub('((sub).)(.*)', r'\2@\1.\3', 'sublee'),
url='https://github.com/sublee/gauge',
description='Deterministic linear gauge library',
long_description=__doc__,
platforms='any',
py_modules=['gauge'],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Games/Entertainment'],
install_requires=['blist'],
tests_require=['pytest'],
)
| bsd-3-clause | Python | |
93504c41694501ef815dc0caf894e381a15140ae | Add an example REPL | asmeurer/iterm2-tools | example_repl.py | example_repl.py | #!/usr/bin/env python
from __future__ import print_function
import sys
if sys.version_info < (3,):
input = raw_input
from iterm2_tools.shell_integration import Prompt, Output
def run_command(text):
if text:
print("I got the text", text)
return 1 if ' ' in text else 0
if __name__ == '__main__':
print("""
Welcome to an example REPL
If you are using a new enough version of iTerm2, you should see a blue arrow
next to "input>" below. Enter some text. Some things to try:
- Use Cmd-Shift-Up and Cmd-Shift-Down to cycle through inputs.
- If the text has a space in it its error code will 1. The arrow next to
that input should turn red.
- Right click on an "input>" and choose "Command Info" to see information
on that "command".
- Use Cmd-Shift-A to select the output from the previous "command".
Type Ctrl-D to exit.
""")
while True:
with Prompt():
print("input> ", end='')
try:
text = input()
except EOFError:
break
with Output() as o:
return_val = run_command(text)
o.set_command_status(return_val)
| mit | Python | |
be37775945e92deea028d0dbc6b7e28fe96de400 | add setup.py | mrocklin/multipolyfit | setup.py | setup.py | from os.path import exists
from setuptools import setup
setup(name='multipolyfit',
version='0.0.1',
description='Multivariate Polynomial fitting with NumPy',
url='http://github.com/mrocklin/multipolyfit',
author='Matthew Rocklin',
author_email='mrocklin@gmail.com',
license='BSD',
packages=['multipolyfit'],
long_description=open('README.md').read() if exists("README.md") else "",
zip_safe=False)
| bsd-3-clause | Python | |
36f6a591333d6e9a6d216abff7db3b5353129ee4 | Add a install script. | supertask/KSUFucker,supertask/KSUFucker,supertask/KSUFucker,supertask/KSUHack,supertask/KSUHack,supertask/KSUFucker,supertask/KSUHack,supertask/KSUFucker | install_pages.py | install_pages.py | from datetime import date
import subprocess
"""
g0947064
http://www.cse.kyoto-su.ac.jp/~g0947343/webcom/
"""
#student_type_dict = {"g": "B", "i": "M"} # B=Bachelor, M=Master
def get_year(grade, date_today):
"""Estimates a year from grade using a date.
example:
today -> 2016
1,2,3,4 -> 2016,2015,2014,2013
"""
if date_today.month < 4:
freshman_year = date_today.year - 1
else:
freshman_year = date_today.year
return freshman_year - grade + 1
def get_student_IDs(student_type, department, grades=[1,2,3,4]):
student_IDs = []
date_today = date.today()
department = str(department)
for a_grade in grades:
a_year = str(get_year(a_grade, date_today))
student_number_head = a_year[-1] + department
student_ID_head = student_type + a_year[-2:] + department
for index in range(0000, 10000): #0000~9999
student_number = student_number_head + str(index).zfill(4)
student_ID = student_ID_head + str(index).zfill(4)
combined_number = sum([int(c) for c in student_number])
if combined_number % 10 == 0:
student_IDs.append([a_grade, student_ID])
return student_IDs
#year = datetime.datetime.now().year
# 2016=1, 2015=2, 2014=3, 2013=4, 2012=5
domains = ["http://www.cse.kyoto-su.ac.jp", "http://www.cc.kyoto-su.ac.jp"]
each_folders = [
[
"index.html",
"index-j.html"
],
None,
[
"SouriDaijin/",
"PL/",
"PL/SouriDaijin/",
"webcom/index.html",
"webcom/report03.html",
"webcom/report04.html",
"webcom/report05.html",
"webcom/1-3.html",
"webcom/2-1.html",
"webcom/2-2.html",
"webcom/2-4.html",
"webcom/2-6.html",
],
None
]
student_IDs = []
student_IDs += get_student_IDs(student_type="g", department=4, grades=[1,2,3,4])
#student_IDs += get_student_IDs(student_type="i", department=5, grades=[1,2]) #gradstudent
page_cnt = 0
for domain in domains:
for grade, student_ID in student_IDs:
student_url = "%s/~%s/" % (domain, student_ID)
folders = []
for f in each_folders[:grade]:
if f: folders+=f
for relative_path in folders:
url = student_url + relative_path
#subprocess.call(["wget","-r", url])
page_cnt+=1
print url
print page_cnt
| mit | Python | |
2b7536f3919d77a7c7a815f895a10221f547ddc0 | Add setup.py | jhlegarreta/ITKCuberille,thewtex/ITKCuberille,thewtex/ITKCuberille,InsightSoftwareConsortium/ITKCuberille,jhlegarreta/ITKCuberille,jhlegarreta/ITKCuberille,thewtex/ITKCuberille,InsightSoftwareConsortium/ITKCuberille,InsightSoftwareConsortium/ITKCuberille | setup.py | setup.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-cuberille',
version='0.1.0',
author='Dan Mueller',
author_email='itk+community@discourse.itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKCuberille',
description=r'This module implements cuberille implicit surface polygonization for ITK.',
long_description=r'This method operates by diving the surface into a number of small cubes called cuberilles. Each cuberille is centered at a pixel lying on the iso-surface and then quadrilaterals are generated for each face. The original approach is improved by projecting the vertices of each cuberille onto the implicit surface, smoothing the typical block-like resultant mesh.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit',
url=r'https://itk.org/',
install_requires=[
r'itk'
]
)
| apache-2.0 | Python | |
3930398eab788a045bdd3f960ef0b1515b6bcf3e | Add views to display list of current projects accepted in the program. | SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange | app/soc/modules/gsoc/views/projects_list.py | app/soc/modules/gsoc/views/projects_list.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the views for listing all the projects accepted
into a GSoC program.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from django.conf.urls.defaults import url
from soc.logic.exceptions import AccessViolation
from soc.views.template import Template
from soc.modules.gsoc.logic.models.student_project import logic as sp_logic
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import lists
from soc.modules.gsoc.views.helper import url_patterns
class ProjectList(Template):
"""Template for listing the student projects accepted in the program.
"""
def __init__(self, request, data):
self.request = request
self.data = data
list_config = lists.ListConfiguration()
list_config.addColumn('student', 'Student',
lambda entity, *args: entity.student.user.name)
list_config.addSimpleColumn('title', 'Title')
list_config.addColumn('org', 'Organization',
lambda entity, *args: entity.scope.name)
list_config.addColumn('mentor', 'Mentor',
lambda entity, *args: entity.mentor.user.name)
self._list_config = list_config
def context(self):
list = lists.ListConfigurationResponse(
self._list_config, idx=0,
description='List of projects accepted into %s' % (
self.data.program.name))
return {
'lists': [list],
}
def getListData(self):
"""Returns the list data as requested by the current request.
If the lists as requested is not supported by this component None is
returned.
"""
idx = lists.getListIndex(self.request)
if idx == 0:
fields = {'program': self.data.program,
'status': 'accepted'}
response_builder = lists.QueryContentResponseBuilder(
self.request, self._list_config, sp_logic,
fields)
return response_builder.build()
else:
return None
def templatePath(self):
return "v2/modules/gsoc/projects_list/_project_list.html"
class ListProjects(RequestHandler):
"""View methods for listing all the projects accepted into a program.
"""
def templatePath(self):
return 'v2/modules/gsoc/projects_list/base.html'
def djangoURLPatterns(self):
"""Returns the list of tuples for containing URL to view method mapping.
"""
return [
url(r'^gsoc/list_projects/%s$' % url_patterns.PROGRAM, self,
name='gsoc_accepted_projects')
]
def checkAccess(self):
"""Access checks for the view.
"""
pass
def jsonContext(self):
"""Handler for JSON requests.
"""
list_content = ProjectList(self.request, self.data).getListData()
if not list_content:
raise AccessViolation(
'You do not have access to this data')
return list_content.content()
def context(self):
"""Handler for GSoC Accepted Projects List page HTTP get request.
"""
program = self.data.program
return {
'page_name': '%s - Accepted Projects' % program.short_name,
'program_name': program.name,
'project_list': ProjectList(self.request, self.data),
}
| apache-2.0 | Python | |
458a98b3c70d5990f8e8c5b4e412342a58ac31bf | Create setup.py | alunmorgan/testing_integration | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name="integration_testing",
author = "Alun Morgan",
version = "0.1",
packages = find_packages(),
install_requires = ['numpy'],
)
#
| apache-2.0 | Python | |
e8c8fde695803320096d58a6b8103210d566f2b0 | Add whitelist for the threading module (#150) | jendrikseipp/vulture,jendrikseipp/vulture | vulture/whitelists/threading_whitelist.py | vulture/whitelists/threading_whitelist.py | import threading
threading.Thread.daemon
threading.Thread.name
threading.Thread.run
| mit | Python | |
4f95d4d73fc9823aa59737fbebc81373ff3694dc | Create setup.py | mthbernardes/tempMail | setup.py | setup.py | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='tempMail',
version='1.0',
url='https://github.com/mthbernardes/tempMail',
license='MIT License',
author='Matheus Bernardes',
author_email='mthbernardes@gmail.com',
keywords='email temporary',
description=u'Module to generate an temporary e-mail',
packages=['tempMail'],
install_requires=['requests','lxml'],
)
| mit | Python | |
34ff26caef5c67eb603179a631db69ab527b494b | Complete bencoding encode/decode methods | dionyziz/downpour | bencoding.py | bencoding.py | def decode( data ):
def decode_one( data ):
if data[ 0 ] == 'i':
# data is an integer
pos = data.index( 'e' )
return ( int( data[ 1:pos ] ), data[ ( pos + 1 ): ] )
if data[ 0 ] == 'l':
# data is a list
data = data[ 1: ]
l = []
while data[ 0 ] != 'e':
( item, data ) = decode_one( data )
l.append( item )
return ( l, data[ 1: ] )
if data[ 0 ] == 'd':
# data is a dictionary
data = data[ 1: ]
d = {}
while data[ 0 ] != 'e':
( key, data ) = decode_one( data )
( value, data ) = decode_one( data )
d[ key ] = value
return ( d, data[ 1: ] )
# default case: data is a string
pos = data.index( ':' )
length = int( data[ 0:pos ] )
data = data[ ( pos + 1 ): ]
return ( data[ 0:length ], data[ length: ] )
return decode_one( data )[ 0 ]
def encode( data ):
if type( data ) is str:
return str( len( data ) ) + ':' + data
if type( data ) is int:
return 'i' + str( data ) + 'e'
if type( data ) is list:
return 'l' + ''.join( map( encode, data ) ) + 'e'
if type( data ) is dict:
flattened = [ item for sublist in data.items() for item in sublist ]
encoded = map( encode, flattened )
joined = ''.join( encoded )
return 'd' + joined + 'e'
| mit | Python | |
6fc950932e4af5428b323cacb82eb5c6b89b1a68 | Add a gallery example showing individual basic geometric symbols (#1211) | GenericMappingTools/gmt-python,GenericMappingTools/gmt-python | examples/gallery/symbols/basic_symbols.py | examples/gallery/symbols/basic_symbols.py | """
Basic geometric symbols
-----------------------
The :meth:`pygmt.Figure.plot` method can plot individual geometric symbols
by passing the corresponding shortcuts to the ``style`` parameter. The 14 basic
geometric symbols are shown underneath their corresponding shortcut codes.
Four symbols (**-**, **+**, **x** and **y**) are line-symbols only for which we
can adjust the linewidth via the ``pen`` parameter. The point symbol (**p**)
only takes a color fill which we can define via the ``color`` parameter. For the
remaining symbols we may define a linewidth as well as a color fill.
"""
import pygmt
fig = pygmt.Figure()
fig.basemap(region=[0, 8, 0, 3], projection="X12c/4c", frame=True)
# define fontstlye for annotations
font = "15p,Helvetica-Bold"
# upper row
y = 2
# use a dash in x direction (-) with a size of 0.9 cm,
# linewidth is set to 2p and the linecolor to "gray40"
fig.plot(x=1, y=y, style="-0.9c", pen="2p,gray40")
fig.text(x=1, y=y + 0.6, text="-", font=font)
# use a plus (+) with a size of 0.9 cm,
# linewidth is set to 2p and the linecolor to "gray40"
fig.plot(x=2, y=y, style="+0.9c", pen="2p,gray40")
fig.text(x=2, y=y + 0.6, text="+", font=font)
# use a star (a) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" (default) and the
# color fill to "darkorange"
fig.plot(x=3, y=y, style="a0.9c", pen="1p,black", color="darkorange")
fig.text(x=3, y=y + 0.6, text="a", font=font)
# use a circle (c) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "darkred"
fig.plot(x=4, y=y, style="c0.9c", pen="1p,black", color="darkred")
fig.text(x=4, y=y + 0.6, text="c", font=font)
# use a diamond (d) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "seagreen"
fig.plot(x=5, y=y, style="d0.9c", pen="1p,black", color="seagreen")
fig.text(x=5, y=y + 0.6, text="d", font=font)
# use a octagon (g) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "dodgerblue4"
fig.plot(x=6, y=y, style="g0.9c", pen="1p,black", color="dodgerblue4")
fig.text(x=6, y=y + 0.6, text="g", font=font)
# use a hexagon (h) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "lightgray"
fig.plot(x=7, y=y, style="h0.9c", pen="1p,black", color="lightgray")
fig.text(x=7, y=y + 0.6, text="h", font=font)
# lower row
y = 0.5
# use an inverted triangle (i) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "tomato"
fig.plot(x=1, y=y, style="i0.9c", pen="1p,black", color="tomato")
fig.text(x=1, y=y + 0.6, text="i", font=font)
# use pentagon (n) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "lightseagreen"
fig.plot(x=2, y=y, style="n0.9c", pen="1p,black", color="lightseagreen")
fig.text(x=2, y=y + 0.6, text="n", font=font)
# use a point (p) with a size of 0.9 cm,
# color fill is set to "lightseagreen"
fig.plot(x=3, y=y, style="p0.9c", color="slateblue")
fig.text(x=3, y=y + 0.6, text="p", font=font)
# use square (s) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "gold2"
fig.plot(x=4, y=y, style="s0.9c", pen="1p,black", color="gold2")
fig.text(x=4, y=y + 0.6, text="s", font=font)
# use triangle (t) with a size of 0.9 cm,
# linewidth is set to 1p, the linecolor to "black" and the
# color fill to "magenta4"
fig.plot(x=5, y=y, style="t0.9c", pen="1p,black", color="magenta4")
fig.text(x=5, y=y + 0.6, text="t", font=font)
# use cross (x) with a size of 0.9 cm,
# linewidth is set to 2p and the linecolor to "gray40"
fig.plot(x=6, y=y, style="x0.9c", pen="2p,gray40")
fig.text(x=6, y=y + 0.6, text="x", font=font)
# use a dash in y direction (y) with a size of 0.9 cm,
# linewidth is set to 2p and the linecolor to "gray40"
fig.plot(x=7, y=y, style="y0.9c", pen="2p,gray40")
fig.text(x=7, y=y + 0.6, text="y", font=font)
fig.show()
| bsd-3-clause | Python | |
4989c443b9a247c784ad7b0e9a76df59358825a2 | Add iaas manager and also some logic to choose the provider | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/integrations/iaas/manager.py | dbaas/integrations/iaas/manager.py | from cloudstack.cloudstack_client import CloudStackProvider
from cloudstack.models import PlanAttr
from pre_provisioned.pre_provisioned_client import PreProvisionedProvider
import logging
LOG = logging.getLogger(__name__)
class IaaSManager():
def __init__(self, plan, environment):
LOG.info("IaaS manager initialized...")
self.plan = plan
self.environment = plan
if PlanAttr.objects.filter(plan=self.plan):
self.create_cloud_stack_instance()
else:
self.create_pre_provisioned_instance()
def create_cloud_stack_instance(self):
LOG.info("Creating cloud stack instance...")
self.instance = CloudStackProvider().create_instance(self.plan, self.environment)
def create_pre_provisioned_instance(self):
LOG.info("Creating pre provisioned instance...")
self.instance = PreProvisionedProvider().create_instance(self.plan, self.environment)
| bsd-3-clause | Python | |
596535209e4d48e18ed9e471b22c97d74d24155f | Create base_script.py | diesendruck/utils,diesendruck/utils | base_script.py | base_script.py | import argparse
import os
from datetime import datetime
from pathlib import Path
def argparse():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='')
parser.add_argument('--output_dir', type=str, default='output')
args = parser.parse_args()
return args
def main(args):
pass
if __name__ == '__main__':
args = argparse()
time = datetime.now().strftime('%Y%m%d%H%M%S')
args.out_dir = os.path.join(args.out_dir, time)
Path(args.out_dir).mkdir(parents=True, exist_ok=True)
main(args)
| mit | Python | |
5fba93a26b6f09c20391ec18b281def2bd851650 | Add test for semantics of for-loop that optimisation can break. | SungEun-Steve-Kim/test-mp,Vogtinator/micropython,turbinenreiter/micropython,HenrikSolver/micropython,drrk/micropython,alex-march/micropython,pozetroninc/micropython,blmorris/micropython,dmazzella/micropython,kerneltask/micropython,Timmenem/micropython,adamkh/micropython,adamkh/micropython,heisewangluo/micropython,infinnovation/micropython,SHA2017-badge/micropython-esp32,rubencabrera/micropython,dxxb/micropython,dinau/micropython,praemdonck/micropython,slzatz/micropython,firstval/micropython,toolmacher/micropython,slzatz/micropython,martinribelotta/micropython,ericsnowcurrently/micropython,galenhz/micropython,xyb/micropython,danicampora/micropython,adafruit/micropython,lbattraw/micropython,swegener/micropython,blmorris/micropython,praemdonck/micropython,heisewangluo/micropython,ernesto-g/micropython,SHA2017-badge/micropython-esp32,blazewicz/micropython,torwag/micropython,ahotam/micropython,ChuckM/micropython,jimkmc/micropython,bvernoux/micropython,mianos/micropython,ruffy91/micropython,alex-robbins/micropython,skybird6672/micropython,alex-march/micropython,warner83/micropython,ahotam/micropython,mgyenik/micropython,tuc-osg/micropython,jlillest/micropython,swegener/micropython,orionrobots/micropython,xyb/micropython,neilh10/micropython,redbear/micropython,ganshun666/micropython,xuxiaoxin/micropython,chrisdearman/micropython,tobbad/micropython,hosaka/micropython,misterdanb/micropython,adafruit/circuitpython,ChuckM/micropython,toolmacher/micropython,tdautc19841202/micropython,PappaPeppar/micropython,pozetroninc/micropython,vitiral/micropython,kostyll/micropython,suda/micropython,lowRISC/micropython,selste/micropython,henriknelson/micropython,adafruit/micropython,noahwilliamsson/micropython,praemdonck/micropython,neilh10/micropython,vriera/micropython,SungEun-Steve-Kim/test-mp,SHA2017-badge/micropython-esp32,adamkh/micropython,jimkmc/micropython,micropython/micropython-esp32,oopy/micropython,ganshun666/micropython,Vogtinator/micropython,kerneltask/micropython,vitiral/micropython,adamkh/micropython,kostyll/micropython,jmarcelino/pycom-micropython,orionrobots/micropython,orionrobots/micropython,aethaniel/micropython,hiway/micropython,suda/micropython,puuu/micropython,vriera/micropython,noahchense/micropython,cwyark/micropython,ryannathans/micropython,xuxiaoxin/micropython,MrSurly/micropython-esp32,jmarcelino/pycom-micropython,blazewicz/micropython,rubencabrera/micropython,pfalcon/micropython,cwyark/micropython,suda/micropython,ericsnowcurrently/micropython,praemdonck/micropython,mpalomer/micropython,emfcamp/micropython,tralamazza/micropython,hosaka/micropython,ganshun666/micropython,chrisdearman/micropython,pramasoul/micropython,EcmaXp/micropython,EcmaXp/micropython,redbear/micropython,jmarcelino/pycom-micropython,paul-xxx/micropython,cloudformdesign/micropython,dinau/micropython,lowRISC/micropython,Vogtinator/micropython,HenrikSolver/micropython,KISSMonX/micropython,puuu/micropython,torwag/micropython,toolmacher/micropython,blmorris/micropython,dinau/micropython,dhylands/micropython,alex-robbins/micropython,jmarcelino/pycom-micropython,PappaPeppar/micropython,KISSMonX/micropython,mhoffma/micropython,cnoviello/micropython,supergis/micropython,xyb/micropython,kerneltask/micropython,hiway/micropython,Timmenem/micropython,MrSurly/micropython,warner83/micropython,ericsnowcurrently/micropython,neilh10/micropython,emfcamp/micropython,MrSurly/micropython,ericsnowcurrently/micropython,blazewicz/micropython,utopiaprince/micropython,matthewelse/micropython,heisewangluo/micropython,xuxiaoxin/micropython,misterdanb/micropython,ryannathans/micropython,deshipu/micropython,Timmenem/micropython,blazewicz/micropython,pramasoul/micropython,ernesto-g/micropython,utopiaprince/micropython,mpalomer/micropython,selste/micropython,cnoviello/micropython,ryannathans/micropython,ceramos/micropython,stonegithubs/micropython,swegener/micropython,PappaPeppar/micropython,turbinenreiter/micropython,tdautc19841202/micropython,selste/micropython,tuc-osg/micropython,pfalcon/micropython,danicampora/micropython,trezor/micropython,trezor/micropython,oopy/micropython,misterdanb/micropython,noahchense/micropython,lbattraw/micropython,mpalomer/micropython,danicampora/micropython,warner83/micropython,stonegithubs/micropython,AriZuu/micropython,mianos/micropython,mgyenik/micropython,dinau/micropython,selste/micropython,suda/micropython,jlillest/micropython,neilh10/micropython,supergis/micropython,xuxiaoxin/micropython,TDAbboud/micropython,adafruit/circuitpython,lowRISC/micropython,dxxb/micropython,alex-robbins/micropython,feilongfl/micropython,kerneltask/micropython,noahwilliamsson/micropython,jimkmc/micropython,jlillest/micropython,trezor/micropython,turbinenreiter/micropython,redbear/micropython,xyb/micropython,ceramos/micropython,trezor/micropython,dhylands/micropython,vriera/micropython,noahchense/micropython,MrSurly/micropython-esp32,dhylands/micropython,orionrobots/micropython,alex-robbins/micropython,paul-xxx/micropython,galenhz/micropython,MrSurly/micropython,turbinenreiter/micropython,vriera/micropython,aethaniel/micropython,Timmenem/micropython,ryannathans/micropython,adafruit/circuitpython,ceramos/micropython,TDAbboud/micropython,jimkmc/micropython,cwyark/micropython,hiway/micropython,paul-xxx/micropython,mhoffma/micropython,dinau/micropython,pfalcon/micropython,ChuckM/micropython,hosaka/micropython,henriknelson/micropython,paul-xxx/micropython,martinribelotta/micropython,redbear/micropython,heisewangluo/micropython,cwyark/micropython,cloudformdesign/micropython,noahwilliamsson/micropython,EcmaXp/micropython,TDAbboud/micropython,xhat/micropython,neilh10/micropython,supergis/micropython,jmarcelino/pycom-micropython,matthewelse/micropython,hiway/micropython,cnoviello/micropython,alex-robbins/micropython,ceramos/micropython,ruffy91/micropython,torwag/micropython,micropython/micropython-esp32,adafruit/micropython,xyb/micropython,deshipu/micropython,ChuckM/micropython,PappaPeppar/micropython,cloudformdesign/micropython,supergis/micropython,adafruit/circuitpython,stonegithubs/micropython,tuc-osg/micropython,micropython/micropython-esp32,omtinez/micropython,jimkmc/micropython,AriZuu/micropython,EcmaXp/micropython,bvernoux/micropython,tobbad/micropython,pramasoul/micropython,martinribelotta/micropython,tuc-osg/micropython,ericsnowcurrently/micropython,firstval/micropython,hiway/micropython,omtinez/micropython,tobbad/micropython,jlillest/micropython,infinnovation/micropython,ahotam/micropython,firstval/micropython,KISSMonX/micropython,pozetroninc/micropython,adafruit/micropython,oopy/micropython,henriknelson/micropython,Peetz0r/micropython-esp32,tdautc19841202/micropython,omtinez/micropython,dxxb/micropython,Peetz0r/micropython-esp32,Peetz0r/micropython-esp32,mgyenik/micropython,SHA2017-badge/micropython-esp32,puuu/micropython,drrk/micropython,praemdonck/micropython,vitiral/micropython,EcmaXp/micropython,redbear/micropython,kostyll/micropython,blazewicz/micropython,matthewelse/micropython,ruffy91/micropython,PappaPeppar/micropython,xuxiaoxin/micropython,stonegithubs/micropython,pfalcon/micropython,dxxb/micropython,henriknelson/micropython,kostyll/micropython,chrisdearman/micropython,mhoffma/micropython,kostyll/micropython,suda/micropython,utopiaprince/micropython,micropython/micropython-esp32,AriZuu/micropython,xhat/micropython,emfcamp/micropython,ahotam/micropython,KISSMonX/micropython,chrisdearman/micropython,HenrikSolver/micropython,pramasoul/micropython,TDAbboud/micropython,puuu/micropython,warner83/micropython,blmorris/micropython,oopy/micropython,HenrikSolver/micropython,martinribelotta/micropython,tobbad/micropython,mgyenik/micropython,mianos/micropython,lowRISC/micropython,pozetroninc/micropython,tdautc19841202/micropython,misterdanb/micropython,paul-xxx/micropython,bvernoux/micropython,tralamazza/micropython,Vogtinator/micropython,dmazzella/micropython,MrSurly/micropython-esp32,slzatz/micropython,mhoffma/micropython,MrSurly/micropython,selste/micropython,hosaka/micropython,lbattraw/micropython,emfcamp/micropython,bvernoux/micropython,lowRISC/micropython,cnoviello/micropython,ruffy91/micropython,SHA2017-badge/micropython-esp32,warner83/micropython,slzatz/micropython,galenhz/micropython,slzatz/micropython,feilongfl/micropython,Vogtinator/micropython,cloudformdesign/micropython,blmorris/micropython,martinribelotta/micropython,rubencabrera/micropython,mianos/micropython,aethaniel/micropython,henriknelson/micropython,puuu/micropython,SungEun-Steve-Kim/test-mp,skybird6672/micropython,stonegithubs/micropython,drrk/micropython,SungEun-Steve-Kim/test-mp,omtinez/micropython,HenrikSolver/micropython,tralamazza/micropython,dhylands/micropython,ChuckM/micropython,pramasoul/micropython,rubencabrera/micropython,Peetz0r/micropython-esp32,adafruit/micropython,ernesto-g/micropython,dxxb/micropython,pfalcon/micropython,adafruit/circuitpython,toolmacher/micropython,swegener/micropython,KISSMonX/micropython,ahotam/micropython,danicampora/micropython,adafruit/circuitpython,dmazzella/micropython,TDAbboud/micropython,drrk/micropython,torwag/micropython,cnoviello/micropython,feilongfl/micropython,mhoffma/micropython,cwyark/micropython,aethaniel/micropython,matthewelse/micropython,supergis/micropython,xhat/micropython,danicampora/micropython,torwag/micropython,feilongfl/micropython,infinnovation/micropython,AriZuu/micropython,MrSurly/micropython-esp32,toolmacher/micropython,tralamazza/micropython,infinnovation/micropython,galenhz/micropython,feilongfl/micropython,matthewelse/micropython,alex-march/micropython,misterdanb/micropython,mpalomer/micropython,dhylands/micropython,oopy/micropython,ruffy91/micropython,deshipu/micropython,hosaka/micropython,kerneltask/micropython,firstval/micropython,chrisdearman/micropython,vitiral/micropython,dmazzella/micropython,turbinenreiter/micropython,AriZuu/micropython,deshipu/micropython,xhat/micropython,mpalomer/micropython,Peetz0r/micropython-esp32,skybird6672/micropython,rubencabrera/micropython,ceramos/micropython,drrk/micropython,adamkh/micropython,mianos/micropython,tuc-osg/micropython,jlillest/micropython,xhat/micropython,skybird6672/micropython,MrSurly/micropython-esp32,SungEun-Steve-Kim/test-mp,matthewelse/micropython,vriera/micropython,cloudformdesign/micropython,Timmenem/micropython,galenhz/micropython,noahchense/micropython,utopiaprince/micropython,omtinez/micropython,firstval/micropython,ernesto-g/micropython,MrSurly/micropython,alex-march/micropython,mgyenik/micropython,deshipu/micropython,tdautc19841202/micropython,noahchense/micropython,micropython/micropython-esp32,aethaniel/micropython,tobbad/micropython,noahwilliamsson/micropython,infinnovation/micropython,alex-march/micropython,swegener/micropython,utopiaprince/micropython,skybird6672/micropython,heisewangluo/micropython,bvernoux/micropython,pozetroninc/micropython,vitiral/micropython,lbattraw/micropython,ryannathans/micropython,noahwilliamsson/micropython,orionrobots/micropython,lbattraw/micropython,trezor/micropython,ganshun666/micropython,ernesto-g/micropython,ganshun666/micropython,emfcamp/micropython | tests/basics/for3.py | tests/basics/for3.py | # test assigning to iterator within the loop
for i in range(2):
print(i)
i = 2
# test assigning to range parameter within the loop
# (since we optimise for loops, this needs checking, currently it fails)
#n = 2
#for i in range(n):
# print(i)
# n = 0
| mit | Python | |
10bc476eee20270d0cda93cded5c808d38c90b16 | Add ConfigData class module | corerd/PyDomo,corerd/PyDomo,corerd/PyDomo,corerd/PyDomo | utils/configdataload.py | utils/configdataload.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Get configuration parameters
"""
import json
class ConfigData:
def __init__(self):
self.data = None
def load(self, loadFile):
json_data = open(loadFile)
self.data = json.load(json_data)
json_data.close()
if __name__ == "__main__":
pass
| mit | Python | |
86312f5f7ee0f26e5b94c37b7d12ff9826c92de6 | Create intNotLikeOthers.py | NendoTaka/CodeForReference,NendoTaka/CodeForReference,NendoTaka/CodeForReference | Codingame/Python/Clash/intNotLikeOthers.py | Codingame/Python/Clash/intNotLikeOthers.py | import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
numbers = [int(x) for x in input().split()]
u = 0
d = 0
for x in numbers:
if x >= 0:
u += 1
else:
d += 1
if u == 1:
for x in numbers:
if x >= 0:
print(x)
else:
for x in numbers:
if x < 0:
print(x)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
| mit | Python | |
7888ec20f67a8c708608b16e2c86b23993e3648a | add bulls-and-cows | zeyuanxy/leet-code,zeyuanxy/leet-code,EdisonAlgorithms/LeetCode,EdisonAlgorithms/LeetCode,zeyuanxy/leet-code,EdisonAlgorithms/LeetCode | vol6/bulls-and-cows/bulls-and-cows.py | vol6/bulls-and-cows/bulls-and-cows.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-10-30 20:10:42
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-10-30 20:10:47
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
A = sum(map(lambda (a, b): 1 if a == b else 0, zip(secret, guess)))
m1, m2 = {}, {}
for c in secret:
m1[c] = m1.get(c, 0) + 1
for c in guess:
m2[c] = m2.get(c, 0) + 1
B = sum(min(v, m2.get(c, 0)) for c, v in m1.iteritems()) - A
return '{}A{}B'.format(A, B) | mit | Python | |
08a3317d577e0ee5dfa07f8a81b7a4a018297b4a | Create a script to parse .mat and write .csv file | ToniRV/Learning-to-navigate-without-a-map,ToniRV/Learning-to-navigate-without-a-map | dstar-lite/scripts/python_pipe.py | dstar-lite/scripts/python_pipe.py |
import csv
import numpy as np
import scipy.io as sio
def process_gridworld_data(input, imsize):
# run training from input matlab data file, and save test data prediction in output file
# load data from Matlab file, including
# im_data: flattened images
# state_data: concatenated one-hot vectors for each state variable
# state_xy_data: state variable (x,y position)
# label_data: one-hot vector for action (state difference)
im_size = [imsize, imsize]
matlab_data = sio.loadmat(input)
im_data = matlab_data["batch_im_data"]
im_data = (im_data - 1)/255 # obstacles = 1, free zone = 0
value_data = matlab_data["batch_value_data"]
state1_data = matlab_data["state_x_data"]
state2_data = matlab_data["state_y_data"]
label_data = matlab_data["batch_label_data"]
ydata = label_data.astype('int8')
Xim_data = im_data.astype('float32')
Xim_data = Xim_data.reshape(-1, 1, im_size[0], im_size[1])
Xval_data = value_data.astype('float32')
Xval_data = Xval_data.reshape(-1, 1, im_size[0], im_size[1])
Xdata = np.append(Xim_data, Xval_data, axis=1)
# Need to transpose because Theano is NCHW, while TensorFlow is NHWC
Xdata = np.transpose(Xdata, (0, 2, 3, 1))
S1data = state1_data.astype('int8')
S2data = state2_data.astype('int8')
return im_data
float_formatter = lambda x: "%.1d" % x
im_data = process_gridworld_data("../resources/gridworld_8.mat", 8)
i = 0
im_formatted = []
for line in im_data[1]
if float_formatter(line) != "":
im_formatted.append(float_formatter(line))
i = i +1
import pdb; pdb.set_trace() # breakpoint ec7f2b0e //
print(im_data)
with open('../resources/gridworld_8.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',
quotechar='', quoting=csv.QUOTE_NONE)
writer.writerows(im_formatted)
| mit | Python | |
83e036f3d89c4b3956bde006085becb496a1fb6e | Add python / sklearn comparison script | jlas/ml.q | dbscan/test.py | dbscan/test.py | '''
Generate dummy data, and compare output from scikit-learn's DBSCAN.
Example code based on:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
Run with pytest, e.g.:
py.test test.py
'''
import os
import shutil
import subprocess
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import pandas as pd
EPS = .1
MIN_SAMPLES = 3
def test_compare():
'''Compare result of our DBSCAN to scikit-learn
'''
# Make temp directory for dumping intermediate files
os.mkdir('tmp')
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1], [-3, 3], [3, 3]]
X, _ = make_blobs(
n_samples=1000, centers=centers, cluster_std=0.3, random_state=0)
X = StandardScaler().fit_transform(X)
# Write sample data
pd.DataFrame(X).to_csv('tmp/sample_data.csv', header=None, index=False)
# Compute our DBSCAN
# Run in a python subprocess which sends a few lines of q into the stdin
# of a q interpreter. Assumed to run in same directory as dbscan.q module.
subprocess.run(f'''echo ' \
system "l dbscan.q"; \
t:value each flip `x`y!("FF";",") 0: `$"tmp/sample_data.csv"; \
d:dbscan[t;{MIN_SAMPLES};{EPS}]; \
(`:tmp/q.csv) 0: .h.tx[`csv;flip enlist[`labels]!enlist[d]]' | \
$QHOME/m32/q -q''', shell=True, stdout=subprocess.DEVNULL)
qlabels = pd.read_csv('tmp/q.csv')['labels']
# Compute scikit-learn's DBSCAN
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit(X)
pylabels = db.labels_
# Compare
assert (qlabels == pylabels).all()
# Cleanup temp directory
shutil.rmtree('tmp')
| mit | Python | |
bc39c5fe7537bebcee478903e6890f4b9bab8b77 | delete unit tests | miguelgrinberg/slam | tests/test_delete.py | tests/test_delete.py | import mock
import unittest
import botocore
from slam import cli
from .test_deploy import config, describe_stacks_response
class DeleteTests(unittest.TestCase):
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_delete(self, _load_config, client):
mock_s3 = mock.MagicMock()
mock_cfn = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
client.side_effect = [mock_s3, mock_cfn]
cli.main(['delete'])
mock_cfn.describe_stacks.assert_called_once_with(StackName='foo')
mock_cfn.delete_stack.assert_called_once_with(StackName='foo')
mock_cfn.get_waiter.assert_called_once_with('stack_delete_complete')
mock_cfn.get_waiter().wait.assert_called_once_with(StackName='foo')
mock_s3.delete_object.assert_called_once_with(Bucket='bucket',
Key='lambda-old.zip')
mock_s3.delete_bucket(Bucket='bucket')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_delete_not_deployed(self, _load_config, client):
mock_s3 = mock.MagicMock()
mock_cfn = mock.MagicMock()
mock_cfn.describe_stacks.side_effect = \
botocore.exceptions.ClientError({'Error': {}}, 'operation')
client.side_effect = [mock_s3, mock_cfn]
self.assertRaises(RuntimeError, cli.main, ['delete'])
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_delete_fail_s3_file(self, _load_config, client):
mock_s3 = mock.MagicMock()
mock_cfn = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_s3.delete_object.side_effect = \
botocore.exceptions.ClientError({'Error': {}}, 'operation')
client.side_effect = [mock_s3, mock_cfn]
cli.main(['delete']) # should still work in spite of s3 error
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_delete_fail_s3_bucket(self, _load_config, client):
mock_s3 = mock.MagicMock()
mock_cfn = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_s3.delete_bucket.side_effect = \
botocore.exceptions.ClientError({'Error': {}}, 'operation')
client.side_effect = [mock_s3, mock_cfn]
cli.main(['delete']) # should still work in spite of s3 error
| mit | Python | |
311f1e73768015419564991a773137f9e3555467 | Create disorderly_escape.py | hirenvasani/foobar | disorderly_escape.py | disorderly_escape.py | '''
Challenge 5.1
Disorderly Escape
=================
Oh no! You've managed to free the bunny prisoners and escape Commander Lambdas exploding space station, but her team of elite starfighters has flanked your ship. If you dont jump to hyperspace, and fast, youll be shot out of the sky!
Problem is, to avoid detection by galactic law enforcement, Commander Lambda planted her space station in the middle of a quasar quantum flux field. In order to make the jump to hyperspace, you need to know the configuration of celestial bodies in the quadrant you plan to jump through. In order to do *that*, you need to figure out how many configurations each quadrant could possibly have, so that you can pick the optimal quadrant through which youll make your jump.
There's something important to note about quasar quantum flux fields' configurations: when drawn on a star grid, configurations are considered equivalent by grouping rather than by order. That is, for a given set of configurations, if you exchange the position of any two columns or any two rows some number of times, youll find that all of those configurations are equivalent in that way - in grouping, rather than order.
Write a function answer(w, h, s) that takes 3 integers and returns the number of unique, non-equivalent configurations that can be found on a star grid w blocks wide and h blocks tall where each celestial body has s possible states. Equivalency is defined as above: any two star grids with each celestial body in the same state where the actual order of the rows and columns do not matter (and can thus be freely swapped around). Star grid standardization means that the width and height of the grid will always be between 1 and 12, inclusive. And while there are a variety of celestial bodies in each grid, the number of states of those bodies is between 2 and 20, inclusive. The answer can be over 20 digits long, so return it as a decimal string. The intermediate values can also be large, so you will likely need to use at least 64-bit integers.
For example, consider w=2, h=2, s=2. We have a 2x2 grid where each celestial body is either in state 0 (for instance, silent) or state 1 (for instance, noisy). We can examine which grids are equivalent by swapping rows and columns.
00
00
In the above configuration, all celestial bodies are "silent" - that is, they have a state of 0 - so any swap of row or column would keep it in the same state.
00 00 01 10
01 10 00 00
1 celestial body is emitting noise - that is, has a state of 1 - so swapping rows and columns can put it in any of the 4 positions. All four of the above configurations are equivalent.
00 11
11 00
2 celestial bodies are emitting noise side-by-side. Swapping columns leaves them unchanged, and swapping rows simply moves them between the top and bottom. In both, the *groupings* are the same: one row with two bodies in state 0, one row with two bodies in state 1, and two columns with one of each state.
01 10
01 10
2 noisy celestial bodies adjacent vertically. This is symmetric to the side-by-side case, but it is different because there's no way to transpose the grid.
01 10
10 01
2 noisy celestial bodies diagonally. Both have 2 rows and 2 columns that have one of each state, so they are equivalent to each other.
01 10 11 11
11 11 01 10
3 noisy celestial bodies, similar to the case where only one of four is noisy.
11
11
4 noisy celestial bodies.
There are 7 distinct, non-equivalent grids in total, so answer(2, 2, 2) would return 7.
Languages
=========
To provide a Python solution, edit solution.py
To provide a Java solution, edit solution.java
Test cases
==========
Inputs:
(int) w = 2
(int) h = 2
(int) s = 2
Output:
(string) "7"
Inputs:
(int) w = 2
(int) h = 3
(int) s = 4
Output:
(string) "430"
'''
from math import factorial
from collections import Counter
from fractions import gcd
def cycle_count(c, n):
cc=factorial(n)
for a, b in Counter(c).items():
cc//=(a**b)*factorial(b)
return cc
def cycle_partitions(n, i=1):
yield [n]
for i in range(i, n//2 + 1):
for p in cycle_partitions(n-i, i):
yield [i] + p
def answer(w, h, s):
grid=0
for cpw in cycle_partitions(w):
for cph in cycle_partitions(h):
m=cycle_count(cpw, w)*cycle_count(cph, h)
grid+=m*(s**sum([sum([gcd(i, j) for i in cpw]) for j in cph]))
return grid//(factorial(w)*factorial(h))
print answer(3, 3, 3)
| mit | Python | |
31e5950b35545b293247d2979678b2b6c7c7864c | Create the test file | jwg4/fringe_search,jwg4/fringe_search,jwg4/fringe_search | tests/test_fringe.py | tests/test_fringe.py | import unittest
import ctypes
class TestBranch(unittest.TestCase):
def setUpClass(cls):
cls.obj = ctypes.CDLL("obj/fringe13.o")
def test_object(self):
self.assertIsNotNone(self.obj)
| mit | Python | |
fe6d4383a942eb85e3062f35f5b6d073d92b1cc2 | Add unit test for tuning estimations | SUSE/smdba,SUSE/smdba | tests/test_pgtune.py | tests/test_pgtune.py | # coding: utf-8
"""
Test suite for PgTune.
"""
from unittest.mock import MagicMock, patch
import pytest
import smdba.postgresqlgate
class TestPgTune:
"""
Test PgTune class.
"""
def test_estimate(self):
"""
Test estimation.
:return:
"""
popen = MagicMock()
popen().read = MagicMock(return_value="11.2")
with patch("smdba.postgresqlgate.os.popen", popen):
pgtune = smdba.postgresqlgate.PgTune(10)
pgtune.get_total_memory = MagicMock(return_value=0x1e0384000)
pgtune.estimate()
assert pgtune.config['shared_buffers'] == '1920MB'
assert pgtune.config['effective_cache_size'] == '5632MB'
assert pgtune.config['work_mem'] == '768MB'
assert pgtune.config['maintenance_work_mem'] == '480MB'
assert pgtune.config['max_wal_size'] == '384MB'
assert pgtune.config['checkpoint_completion_target'] == '0.7'
assert pgtune.config['wal_buffers'] == '4MB'
assert pgtune.config['constraint_exclusion'] == 'off'
assert pgtune.config['max_connections'] == 10
assert pgtune.config['cpu_tuple_cost'] == '0.5'
| mit | Python | |
ec9b55a830c2d09d5836d5fb5c6c0c9b0bb67574 | Add test for polycs | Effective-Quadratures/Effective-Quadratures,psesh/Effective-Quadratures | tests/test_polycs.py | tests/test_polycs.py | from unittest import TestCase
import unittest
from equadratures import *
import numpy as np
class TestPolycs(TestCase):
def test_simple2D(self):
d = 5
param = Parameter(distribution='Uniform', lower=-1, upper=1., order=1)
myParameters = [param for _ in range(d)]
def f(x):
return x[0] * x[1]
x_train = np.array([[7.58632788e-01, 4.81746227e-01, 5.02577142e-01,
7.67376530e-01, 4.90829684e-01],
[1.98916966e-01, 8.53442257e-01, 4.65585866e-01,
2.75222052e-01, 6.77784764e-01],
[7.46828043e-01, 8.58487468e-01, 4.32075141e-01,
1.42985459e-01, 6.25679567e-01],
[7.35825598e-01, 1.65463815e-01, 9.13499589e-01,
4.86974147e-04, 1.38084505e-01],
[1.66053494e-01, 8.26502987e-01, 9.81150618e-01,
4.65587483e-01, 5.69055172e-01],
[8.41720170e-01, 4.21238174e-01, 7.42375218e-01,
8.41220207e-02, 2.07048763e-01],
[5.80581970e-01, 4.52048112e-01, 3.92967568e-01,
7.83143576e-01, 7.76403603e-01],
[9.74079876e-01, 8.72576146e-01, 2.10026353e-01,
4.08982657e-01, 1.89006589e-01],
[4.44494044e-01, 5.58853652e-01, 2.25635327e-01,
3.94315874e-01, 1.49055844e-01],
[2.67176489e-01, 7.36300543e-01, 9.07632137e-01,
5.03907567e-01, 3.31995486e-01],
[7.89158773e-01, 6.31673466e-01, 5.23065889e-01,
8.48395576e-02, 6.66838037e-01],
[8.71387227e-01, 3.02452797e-02, 3.66761253e-01,
2.98375233e-02, 8.16636350e-01],
[4.09188935e-01, 7.23745682e-01, 2.70466646e-01,
3.33145142e-01, 1.17563309e-01],
[2.86957871e-01, 9.83273435e-01, 9.50085865e-01,
4.25726126e-01, 7.05275218e-01],
[1.56317650e-01, 1.73866379e-01, 7.74967016e-01,
6.37677812e-01, 7.72158379e-01]])
polynomialOrders = np.full(d, 2)
myBasis = Basis('Total order', polynomialOrders)
poly = Polycs(myParameters, myBasis, training_inputs=x_train, fun=f)
actual_coeffs = np.zeros(myBasis.cardinality)
actual_coeffs[-2] = 1.0/3.0
np.testing.assert_almost_equal(np.linalg.norm(actual_coeffs - poly.coefficients.flatten()), 0, decimal=4,
err_msg="Difference greated than imposed tolerance for coeffs")
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | Python | |
d8d2ef931c5883dd1a04e563ebb67b381af6f541 | Add teuthology.config, the start of a better system | tchaikov/teuthology,ivotron/teuthology,ceph/teuthology,robbat2/teuthology,yghannam/teuthology,ktdreyer/teuthology,ivotron/teuthology,t-miyamae/teuthology,caibo2014/teuthology,dmick/teuthology,robbat2/teuthology,caibo2014/teuthology,dmick/teuthology,dreamhost/teuthology,yghannam/teuthology,t-miyamae/teuthology,tchaikov/teuthology,zhouyuan/teuthology,SUSE/teuthology,michaelsevilla/teuthology,dmick/teuthology,SUSE/teuthology,SUSE/teuthology,ktdreyer/teuthology,ceph/teuthology,zhouyuan/teuthology,michaelsevilla/teuthology,dreamhost/teuthology | teuthology/config.py | teuthology/config.py | #!/usr/bin/env python
import os
import yaml
import logging
CONF_FILE = os.path.join(os.environ['HOME'], '.teuthology.yaml')
log = logging.getLogger(__name__)
class _Config(object):
def __init__(self):
self.__conf = {}
if not os.path.exists(CONF_FILE):
log.debug("%s not found", CONF_FILE)
return
with file(CONF_FILE) as f:
conf_obj = yaml.safe_load_all(f)
for item in conf_obj:
self.__conf.update(item)
@property
def lock_server(self):
return self.__conf.get('lock_server')
@property
def queue_host(self):
return self.__conf.get('queue_host')
@property
def queue_port(self):
return self.__conf.get('queue_port')
@property
def sentry_dsn(self):
return self.__conf.get('sentry_dsn')
config = _Config()
| mit | Python | |
89b1790a5b12a90d03d09280f92f8d068b799a1b | add metadata client | vmx/cbagent,couchbase/cbmonitor,ronniedada/litmus,ronniedada/litmus,couchbase/cbmonitor,pavel-paulau/cbagent,couchbase/cbagent,mikewied/cbagent | priority15/metadata_client.py | priority15/metadata_client.py | import requests
def post_request(request):
def wrapper(*args, **kargs):
url, params = request(*args, **kargs)
requests.post(url, params)
return wrapper
class MetadataClient(object):
def __init__(self, host="127.0.0.1"):
self.base_url = "http://{0}:8000/cbmonitor".format(host)
@post_request
def add_cluster(self, name):
url = self.base_url + "/add_cluster/"
params = {"name": name}
return url, params
@post_request
def add_server(self, cluster, address):
url = self.base_url + "/add_server/"
params = {"cluster": cluster, "address": address}
return url, params
@post_request
def add_bucket(self, cluster, name):
url = self.base_url + "/add_bucket/"
params = {"cluster": cluster, "name": name, "type": "Couchbase"}
return url, params
@post_request
def add_metric(self, cluster, name, bucket=None, server=None):
url = self.base_url + "/add_metric_or_event/"
params = {"type": "metric", "cluster": cluster, "name": name}
if server:
params["server"] = server
if bucket:
params["bucket"] = bucket
return url, params
| apache-2.0 | Python | |
ed985791d20199af9cb34e445d0a96dc11e9129b | Add some basic tests for RRTM scheme. | brian-rose/climlab,cjcardinale/climlab,brian-rose/climlab,cjcardinale/climlab,cjcardinale/climlab | climlab/tests/test_rrtm.py | climlab/tests/test_rrtm.py | from __future__ import division
import numpy as np
import climlab
import pytest
from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW
def test_rrtm_creation():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTMG(state=state)
rad.step_forward()
assert type(rad.subprocess['LW']) is RRTMG_LW
assert type(rad.subprocess['SW']) is RRTMG_SW
assert hasattr(rad, 'OLR')
assert hasattr(rad, 'OLRclr')
assert hasattr(rad, 'ASR')
assert hasattr(rad, 'ASRclr')
def test_swap_component():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTM(state=state)
rad.step_forward()
# Swap out the longwave model for CAM3
rad.remove_subprocess('LW')
rad.step_forward()
rad.add_subprocess('LW', CAM3Radiation_LW(state=state))
rad.step_forward()
assert hasattr(rad, 'OLR')
| mit | Python | |
0d0cbb961a5d19ce98dd7e9805a04ac9c8887687 | Add main file | DeviantTofu/hotcrp2acm | hotcrp2acm.py | hotcrp2acm.py | #! /usr/bin/python
__author__ = "Xiaofan (Fred) Jiang"
__copyright__ = "Copyright 2016, Columbia ICSL"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "jiang@ee.columbia.edu"
__status__ = "Production"
"""
Example:
hotcrp2acm.py input.json output.csv
"""
import json
import csv
import string
import sys
ofile = open(sys.argv[2], "wb")
writer = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
myfile = open(sys.argv[1], "r")
jsonstr = myfile.read()
data = json.loads(jsonstr)
for x in data:
row = ["Demo"]
row.append(x['title'].encode("utf-8"))
t = ""
for y in x['authors']:
t = t + y['first'].encode("utf-8") + " " + y['last'].encode("utf-8") + ":" + y['affiliation'].encode("utf-8") + ";"
t = t[:-1]
row.append(t)
row.append(x['authors'][0]['email'].encode("utf-8"))
t = ""
for i in range(1,len(x['authors'])):
if x['authors'][i].has_key('email'):
t = t + x['authors'][i]['email'].encode("utf-8")+";"
else:
t = t + ";"
t = t[:-1]
row.append(t)
# for line in row:
# print line
writer.writerow(row)
print "Finished conversion successfully!"
ofile.close()
| mit | Python | |
9d94dcff276418edd5d1464761a976ec65df8519 | add install.py stub | dimitardimitrov/sublime.settings | install.py | install.py | # invoke python
# locate sublime
# ask the user if they want to:
# add only the missing records to the user's files
# or replace the files completely
| mit | Python | |
f35baed0d59c508110b97d9d100e36afcb57a6c0 | update alembic | vanesa/kid-o,vanesa/kid-o,vanesa/kid-o,vanesa/kid-o | alembic/versions/7665dea01c37_add_project_to_child.py | alembic/versions/7665dea01c37_add_project_to_child.py | """add project to child
Revision ID: 7665dea01c37
Revises:
Create Date: 2017-10-07 16:37:54.126634
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7665dea01c37'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('child', sa.Column('project', sa.String(length=50), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('child', 'project')
# ### end Alembic commands ###
| bsd-3-clause | Python | |
cde510b5f13e1a2624d7d458415d206ad6c6019c | Add py-httptools (#19186) | LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/py-httptools/package.py | var/spack/repos/builtin/packages/py-httptools/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHttptools(PythonPackage):
"""httptools is a Python binding for the nodejs HTTP parser."""
homepage = "https://github.com/MagicStack/httptools"
url = "https://pypi.io/packages/source/h/httptools/httptools-0.1.1.tar.gz"
version('0.1.1', sha256='41b573cf33f64a8f8f3400d0a7faf48e1888582b6f6e02b82b9bd4f0bf7497ce')
depends_on('py-setuptools', type='build')
depends_on('py-wheel', type='build')
| lgpl-2.1 | Python | |
d3789d601de439201c914c531ede9b2f6ae3154d | Copy geneagraph.py to geneagraph-cgi.py file. The new file will contain the cgi version of the geneagrapher interface, and the new command-line interface will be built in the geneagraph.py file. | davidalber/Geneagrapher,davidalber/Geneagrapher | src/geneagraph-cgi.py | src/geneagraph-cgi.py | #!/usr/bin/python
import cgi
import random
import os
import time
from grab import *
from GGraph import *
#import cgitb; cgitb.enable() # for debugging, comment out for production
form = cgi.FieldStorage()
name = form.getfirst("name", "")
extra = form.getfirst("extra", "")
nodes = form.getlist("node")
output = form.getfirst("output", "png")
# Save the input to log file.
f = open("/var/log/geneagraph", "a")
f.write(time.strftime('%m/%d/%Y %H:%M:%S'))
f.write(" ")
f.write(os.environ['REMOTE_ADDR'])
f.write("\n")
if name != "":
f.write("\tName: ")
f.write(name)
f.write("\n")
if extra != "":
f.write("\tExtra: ")
f.write(extra)
f.write("\n")
if len(nodes) > 0:
f.write("\t")
f.write(str(nodes))
f.write("\n")
f.close()
try:
if len(name) > 100:
raise ValueError("Name field longer than maximum allowed length (100 characters).")
if len(extra) > 100:
raise ValueError("Extra field longer than maximum allowed length (100 characters).")
if len(nodes) > 5:
#if len(nodes) > 50:
raise ValueError("Only five node URLs may be supplied.")
# Replace special characters in name and extra with backslashed form
name = name.replace('\\', '\\\\')
name = name.replace('\"', '\\"')
extra = extra.replace('\\', '\\\\')
extra = extra.replace('"', '\\"')
record = Record(name, extra, -1, 0)
printHead = True
if name == "" and extra == "":
printHead = False
advisors = []
for index in range(len(nodes)):
if not nodes[index].isspace():
if nodes[index].find('id.php?id=') > -1:
id = nodes[index].split('id.php?id=')[1].strip()
if id.isdigit():
advisors.append(int(id))
else:
raise ValueError("Node " + str(index+1) + " was improperly formatted.")
else:
raise ValueError("Node " + str(index+1) + " was improperly formatted.")
node = Node(record, advisors)
graph = Graph(node, printHead)
for advisor in advisors:
extractNodeInformation(advisor, graph)
fnum = str(int(random.random()*1000000000000000))
filename = '/tmp/' + fnum + '.dot'
graph.writeDotFile(filename)
if output == "dot":
print "Content-Type: text/html"
print
print "<html><body><pre>"
f = open(filename, "r")
file = f.read()
f.close()
print file
print "</pre></body></html>"
elif output == "png" or output == "ps":
psfilename = '/tmp/' + fnum + '.ps'
command = '/usr/local/bin/dot -Tps ' + filename + ' -o ' + psfilename
os.system(command)
if output == "png":
pngfilename = '/tmp/' + fnum + '.png'
command = '/usr/bin/convert -density 144 -geometry 50% ' + psfilename + ' ' + pngfilename
os.system(command)
print "Content-type: image/png"
print "Content-Disposition: attachment; filename=genealogy.png"
print
f = open(pngfilename, "r")
elif output == "ps":
print "Content-Type: application/postscript"
print
f = open(psfilename, "r")
file = f.read()
f.close()
print file
else: # improper output chosen
raise ValueError("Return type was improperly formatted. Go back and check it out.")
command = '/bin/rm /tmp/' + fnum + '.*'
os.system(command)
except ValueError, e:
print "Content-type: text/html"
print
print e, "<br>Go back and check it out."
raise SystemExit
| mit | Python | |
00077dcbd9f2394d62fced7490e6797a57dc90c6 | Add new script computing relations precision-recall, given the assignment | NUAAXXY/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,amonszpart/globOpt,amonszpart/globOpt | evaluation/compareMappedGraphs.py | evaluation/compareMappedGraphs.py | import packages.project as project
import packages.primitive as primitive
import packages.processing
import packages.relationGraph as relgraph
import packages.io
import argparse
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms import isomorphism
import numpy as np
################################################################################
## UI Generation
def setupGraphUI(graph, title):
fig, ax1 = plt.subplots()
fig.canvas.set_window_title(title)
graph.draw()
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Compare ground truth noise distribution (continuous generator and generated samples) and the result of the optimisation.')
parser.add_argument('projectdir')
args = parser.parse_args()
projectdir = args.projectdir
if projectdir[-1] == '/':
projectdir = projectdir[:-1]
projectname = projectdir.split('/')[-1]
projectfile = projectdir+'/gt/'+projectname+'.prj'
gtlinesfile = projectdir+'/gt/primitives.csv'
gtassignfile = projectdir+'/gt/points_primitives.csv'
cloudfile = projectdir+'/cloud.ply'
mappingfile = projectdir+'/corresp.csv'
linesfile_it1 = projectdir+'/primitives_it0.bonmin.csv'
assignfile_it1 = projectdir+'/points_primitives_it1.csv'
print 'Processing project ', projectname
################################################################################
## Reading input files
project = project.PyProject(projectfile)
cloud = packages.io.readPointCloudFromPly(cloudfile)
gtlines = primitive.readPrimitivesFromFile(gtlinesfile)
gtassign = packages.io.readPointAssignementFromFiles(gtassignfile)
lines_it1 = primitive.readPrimitivesFromFile(linesfile_it1)
assign_it1 = packages.io.readPointAssignementFromFiles(assignfile_it1)
# associative arrays, mapping
# - the gt primitive to the estimated primitive
# - the gt uid to the estimated uid
primitiveCorres, primitiveCorresId = packages.io.readPrimitiveCorrespondancesFromFiles(mappingfile, gtlines, lines_it1)
#gtlines = packages.processing.removeUnassignedPrimitives(gtlines, gtassign)
#lines_it1 = packages.processing.removeUnassignedPrimitives(lines_it1, assign_it1)
#gtassign = packages.processing.removeUnassignedPoint(gtlines, gtassign)
#assign_it1 = packages.processing.removeUnassignedPoint(lines_it1, assign_it1)
################################################################################
## Build relation graphs
print "Processing GT relations...."
gtGraph = relgraph.RelationGraph(gtlines, gtassign)
print "Processing estimated relations...."
graph_it1 = relgraph.RelationGraph(lines_it1, assign_it1)
#[e['matched']=0 for e in gtGraph.G.edges_iter()]
#[e['matched']=0 for e in graph_it1.G.edges_iter()]
#[e[-1]['matched']=0 for e in graph_it1.G.edges_iter(data=True)]
for e in graph_it1.G.edges_iter(data=True):
e[-1]['matched']=0
for e in gtGraph.G.edges_iter(data=True):
e[-1]['matched']=0
for p in gtlines:
p_node = gtGraph.G.node[p.uid]
if not primitiveCorres.has_key(p):
print "Gt Primitive not matched (",p.uid,",",p.did,")"
else:
matched_p = primitiveCorres[p]
matched_p_node = graph_it1.G.node[matched_p.uid]
# iterate over all relations and check they have a counterpart in the estimated scene
# cUid is the uid of the connected component
# cUid can be used to access the connected component using
# print cUid, primitiveCorresId[cUid]
for idx, cUid in enumerate(gtGraph.G.edge[p.uid]):
# now we are look for the connection starting from matched_p et going to primitiveCorresId[cUid]
# matched_p.uid, primitiveCorresId[cUid]
#
# if we find it, we increment the matched field of the edges, and move to the next one
#print cUid, primitiveCorresId[cUid]
for idx2, matched_cUid in enumerate(graph_it1.G.edge[matched_p.uid]):
#print " ",matched_cUid, primitiveCorresId[cUid]
if matched_cUid == primitiveCorresId[cUid]:
#print " match found !"
gtGraph.G.edge[p.uid][cUid]['matched'] += 1
graph_it1.G.edge[matched_p.uid][matched_cUid]['matched'] += 1
break
def checkEdges(graph, doPrint):
correct=0
error = False
for e in graph.G.edges_iter(data=True):
count = e[-1]['matched']
if (count == 2):correct+=1
elif count == 0:
if doPrint: print "Missed edge detected..."
else: error = True;
return correct, error
gtcorrect, gtError = checkEdges(gtGraph, True)
correct_it1, error_it1 = checkEdges(graph_it1, False)
if gtError or error_it1:
print "Error occurred, invalid number of matches. ABORT"
quit()
if gtcorrect != correct_it1:
print "Error: non-symmetric detection"
quit()
print "precision=",float(gtcorrect)/float(graph_it1.G.number_of_edges())
print "recall =",float(gtcorrect)/float(gtGraph.G.number_of_edges())
| apache-2.0 | Python | |
f84a804cb0e69bc3758958cd6c9f5b195aecd5d8 | Add AWS Batch Support (#796) | cloudtools/troposphere,johnctitus/troposphere,ikben/troposphere,johnctitus/troposphere,pas256/troposphere,ikben/troposphere,cloudtools/troposphere,pas256/troposphere | troposphere/batch.py | troposphere/batch.py | from . import AWSObject, AWSProperty
from .validators import positive_integer
class ComputeResources(AWSProperty):
props = {
"SpotIamFleetRole": (basestring, False),
"MaxvCpus": (positive_integer, True),
"SecurityGroupIds": ([basestring], True),
"BidPercentage": (positive_integer, False),
"Type": (basestring, True),
"Subnets": ([basestring], True),
"MinvCpus": (positive_integer, True),
"ImageId": (basestring, False),
"InstanceRole": (basestring, True),
"InstanceTypes": ([basestring], True),
"Ec2KeyPair": (basestring, False),
"Tags": ([basestring], False),
"DesiredvCpus": (positive_integer, False)
}
class MountPoints(AWSProperty):
props = {
"ReadOnly": (bool, False),
"SourceVolume": (basestring, False),
"ContainerPath": (basestring, False)
}
class VolumesHost(AWSProperty):
props = {
"SourcePath": (basestring, False)
}
class Volumes(AWSProperty):
props = {
"Host": (VolumesHost, False),
"Name": (basestring, False)
}
class Environment(AWSProperty):
props = {
"Value": (basestring, False),
"Name": (basestring, False)
}
class Ulimit(AWSProperty):
props = {
"SoftLimit": (positive_integer, True),
"HardLimit": (positive_integer, True),
"Name": (basestring, True)
}
class ContainerProperties(AWSProperty):
props = {
"MountPoints": ([MountPoints], False),
"User": (basestring, False),
"Volumes": ([Volumes], False),
"Command": ([basestring], False),
"Memory": (positive_integer, True),
"Privileged": (bool, False),
"Environment": ([Environment], False),
"JobRoleArn": (basestring, False),
"ReadonlyRootFilesystem": (bool, False),
"Ulimits": ([Ulimit], False),
"Vcpus": (positive_integer, True),
"Image": (basestring, True)
}
class RetryStrategy(AWSProperty):
props = {
"Attempts": (positive_integer, False)
}
class JobDefinition(AWSObject):
resource_type = "AWS::Batch::JobDefinition"
props = {
"Type": (basestring, True),
"Parameters": (dict, True),
"ContainerProperties": (ContainerProperties, False),
"JobDefinitionName": (basestring, True),
"RetryStrategy": (RetryStrategy, False)
}
def validate_environment_state(environment_state):
""" Validate response type
:param environment_state: State of the environment
:return: The provided value if valid
"""
valid_states = [
"ENABLED",
"DISABLED"
]
if environment_state not in valid_states:
raise ValueError(
"{} is not a valid environment state".format(environment_state)
)
return environment_state
class ComputeEnvironment(AWSObject):
resource_type = "AWS::Batch::ComputeEnvironment"
props = {
"Type": (basestring, True),
"ServiceRole": (basestring, True),
"ComputeEnvironmentName": (basestring, False),
"ComputeResources": (ComputeResources, True),
"State": (validate_environment_state, False)
}
class ComputeEnvironmentOrder(AWSProperty):
props = {
"ComputeEnvironment": (basestring, True),
"Order": (positive_integer, True)
}
def validate_queue_state(queue_state):
""" Validate response type
:param queue_state: State of the queue
:return: The provided value if valid
"""
valid_states = [
"ENABLED",
"DISABLED"
]
if queue_state not in valid_states:
raise ValueError(
"{} is not a valid queue state".format(queue_state)
)
return queue_state
class JobQueue(AWSObject):
resource_type = "AWS::Batch::JobQueue"
props = {
"ComputeEnvironmentOrder": ([ComputeEnvironmentOrder], True),
"Priority": (positive_integer, True),
"State": (validate_queue_state, False),
"JobQueueName": (basestring, True)
}
| bsd-2-clause | Python | |
7fbdd5dbadc8bc8186173545257763471bd69665 | test the use_twisted etc commands | meejah/txaio,oberstet/txaio,oberstet/txaio,crossbario/txaio,tavendo/txaio,crossbario/txaio | test/test_imports.py | test/test_imports.py | import pytest
def test_use_twisted():
pytest.importorskip('twisted')
import txaio
txaio.use_twisted()
assert txaio.using_twisted
assert not txaio.using_asyncio
def test_use_twisted_no_twisted():
# make sure we DO NOT have Twisted installed
try:
import twisted # noqa
return
except ImportError:
pass # no Twisted
import txaio
try:
txaio.use_twisted()
assert "Should have gotten ImportError"
except ImportError:
pass
assert not txaio.using_twisted
assert txaio.using_asyncio
def test_use_asyncio():
pytest.importorskip('asyncio')
import txaio
txaio.use_asyncio()
assert txaio.using_asyncio
assert not txaio.using_twisted
| mit | Python | |
7b773cc61e152c6bf6996d09ab9adb08153b6415 | add Z3-based solver | mtrberzi/sudoku2smt | z3sudoku.py | z3sudoku.py | #!/usr/bin/python
## This program uses the Z3 SMT solver to find solutions to Soduko puzzles.
## Input is from stdin and consists of 9 lines, each of which has 9 entries.
## Each line of input corresponds to a single row of the Sudoku board.
## Each entry on a given line represents the value in the corresponding
## column of that row, given as an integer between 1 and 9,
## or a marker denoting a blank cell, given as a '.' (period).
## The program writes to standard output a set of SMT formulas
## that together encode the initial state of the puzzle
## and the constraints on a valid solution.
## For example, the following input (without comment signs):
## 5 3 . . 7 . . . .
## 6 . . 1 9 5 . . .
## . 9 8 . . . . 6 .
## 8 . . . 6 . . . 3
## 4 . . 8 . 3 . . 1
## 7 . . . 2 . . . 6
## . 6 . . . . 2 8 .
## . . . 4 1 9 . . 5
## . . . . 8 . . 7 9
##
import sys
import re
import z3
regex_one_entry = "\\s*([1-9\\.])\\s*"
regex_one_line = ""
for i in range(9):
regex_one_line += regex_one_entry
re_line = re.compile(regex_one_line)
def parse_sudoku_line(line):
match_line = re_line.search(line)
if match_line:
entries = []
for i in range(9):
entries.append(match_line.group(i + 1))
return entries
else:
raise Exception("Invalid input line '{0}'.".format(line))
initial_grid = [] # initial_grid[row][column] = entry
variables_grid = []
s = z3.Solver()
# Helper functions to generate SMT2 expressions
def entry(row,col):
return "x%d%d" % (row, col)
def declare_variable(row, col):
return z3.Int(entry(row,col))
def declare_entry(row,col):
v = variables_grid[row][col]
initial_entry = initial_grid[row][col]
if initial_entry == ".":
s.add(v >= 1)
s.add(v <= 9)
else:
e = int(initial_entry)
s.add(v == e)
def constrain_row(row):
for i in range(9):
for j in range(i+1, 9):
e1 = variables_grid[row][i]
e2 = variables_grid[row][j]
s.add(e1 != e2)
def constrain_column(col):
for i in range(9):
for j in range(i+1, 9):
e1 = variables_grid[i][col]
e2 = variables_grid[j][col]
s.add(e1 != e2)
def constrain_subgrid(x, y):
xmin = 3*x
xmax = 3*x + 2
ymin = 3*y
ymax = 3*y + 2
for i1 in range(xmin, xmax+1):
for j1 in range(ymin, ymax+1):
for i2 in range(xmin, xmax+1):
for j2 in range(ymin, ymax+1):
if i1 != i2 or j1 != j2:
e1 = variables_grid[i1][j1]
e2 = variables_grid[i2][j2]
s.add(e1 != e2)
## Entry point.
# Read 9 lines of input
for i in range(9):
line = sys.stdin.readline()
grid_line = parse_sudoku_line(line)
initial_grid.append(grid_line)
for row in range(9):
line = []
for col in range(9):
line.append(declare_variable(row, col))
variables_grid.append(line)
# Declare all constants for grid entries
for row in range(9):
for col in range(9):
declare_entry(row, col)
# Constrain all rows
for row in range(9):
constrain_row(row)
# Constrain all columns
for col in range(9):
constrain_column(col)
# Constrain all 3x3 subgrids
for x in range(3):
for y in range(3):
constrain_subgrid(x, y)
# finally
if s.check():
m = s.model()
for row in range(9):
for col in range(9):
v = variables_grid[row][col]
sys.stdout.write(str(m[v]) + " ")
sys.stdout.write("\n")
else:
print("no solution")
| mit | Python | |
a84d8005193328b63eaf98f0852dc72c3e58aed9 | Add example script for evaluating setiment | dankolbman/MarketCents | twitter_feed.py | twitter_feed.py | # authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
| mit | Python | |
4463ee56c210b5dd2c7e117b2b4f3adb3d8a4b67 | Create icreacream.py | Cantal0p3/nyu-devops-homework-1,ilanasufrin/nyu-devops-homework-1,Cantal0p3/nyu-devops-homework-1,ilanasufrin/nyu-devops-homework-1,ilanasufrin/nyu-devops-homework-1,Cantal0p3/nyu-devops-homework-1 | icreacream.py | icreacream.py | # Copyright 2016 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, Response, jsonify, request, json
# Status Codes
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_204_NO_CONTENT = 204
HTTP_400_BAD_REQUEST = 400
HTTP_404_NOT_FOUND = 404
HTTP_409_CONFLICT = 409
# Create Flask application
app = Flask(__name__)
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
return jsonify(name='My REST API Service', version='1.0', url='/resources'), HTTP_200_OK
######################################################################
# LIST ALL resourceS
######################################################################
@app.route('/flavors', methods=['GET'])
def list_all_flavors():
# YOUR CODE here (remove pass)
pass
######################################################################
# RETRIEVE A resource
######################################################################
@app.route('/flavors/flavor/<serialno>', methods=['GET'])
def get_a_flavor(serialno):
# YOUR CODE here (remove pass)
pass
######################################################################
# ADD A NEW resource
######################################################################
@app.route('/flavors/flavor', methods=['POST'])
def create_new_flavor():
# YOUR CODE here (remove pass)
pass
######################################################################
# UPDATE AN EXISTING resource
######################################################################
@app.route('/flavors/flavor/<serialno>', methods=['PUT'])
def update_flavor(serialno):
# YOUR CODE here (remove pass)
pass
######################################################################
# DELETE A resource
######################################################################
@app.route('/flavors/flavor/<serialno>', methods=['DELETE'])
def delete_flavor(serialno):
# YOUR CODE here (remove pass)
pass
############################################################################
# QUERY Resources by some attribute of the Resource - Type: Vegan/Non-Vegan
############################################################################
@app.route('/flavors/<attributeValue>', methods=['GET'])
def list_resources_by_type(attributeValue):
# YOUR CODE here (remove pass)
pass
######################################################################
# PERFORM some Action on the Resource - UPDATE a resource status
######################################################################
@app.route('/flavors/flavor/<serialno>/<statusvalue>', methods=['PUT'])
def update_flavor_status(serialno,statusvalue):
# YOUR CODE here (remove pass)
pass
######################################################################
# M A I N
######################################################################
if __name__ == "__main__":
# Get bindings from the environment
port = os.getenv('PORT', '5000')
app.run(host='0.0.0.0', port=int(port), debug=True)
| apache-2.0 | Python | |
3c1971d1175f0257e5297c273a780b337b72b319 | Add tests from AutoParadigmMixin | thiderman/network-kitten | test/test_util.py | test/test_util.py | from kitten.util import AutoParadigmMixin
class TestAutoParadigmMixin(object):
def setup_method(self, method):
self.apm = AutoParadigmMixin()
def test_first_load(self):
ret = self.apm.paradigms
assert 'node' in ret
assert 'node' in self.apm._paradigms
def test_second_load(self):
self.apm._paradigms = {'hehe': True}
ret = self.apm.paradigms
assert 'hehe' in ret
assert 'hehe' in self.apm._paradigms
| mit | Python | |
0c6be9c453f7393bfeb621cc9e2f6142fdb11cc8 | Add example about using custom density distributions. | MCGallaspy/pymc3,kyleam/pymc3,jameshensman/pymc3,jameshensman/pymc3,arunlodhi/pymc3,superbobry/pymc3,MCGallaspy/pymc3,Anjum48/pymc3,LoLab-VU/pymc,hothHowler/pymc3,CVML/pymc3,JesseLivezey/pymc3,JesseLivezey/pymc3,kmather73/pymc3,CVML/pymc3,MichielCottaar/pymc3,tyarkoni/pymc3,wanderer2/pymc3,dhiapet/PyMC3,superbobry/pymc3,kmather73/pymc3,dhiapet/PyMC3,clk8908/pymc3,kyleam/pymc3,MichielCottaar/pymc3,evidation-health/pymc3,evidation-health/pymc3,hothHowler/pymc3,clk8908/pymc3,arunlodhi/pymc3,wanderer2/pymc3,tyarkoni/pymc3,LoLab-VU/pymc,Anjum48/pymc3 | pymc/examples/custom_dists.py | pymc/examples/custom_dists.py | # This model was presented by Jake Vanderplas in his blog post about
# comparing different MCMC packages
# http://jakevdp.github.io/blog/2014/06/14/frequentism-and-bayesianism-4-bayesian-in-python/
#
# While at the core it's just a linear regression, it's a nice
# illustration of using Jeffrey priors and custom density
# distributions in PyMC3.
#
# Adapted to PyMC3 by Thomas Wiecki
import matplotlib.pyplot as plt
import numpy as np
import pymc
import theano.tensor as T
np.random.seed(42)
theta_true = (25, 0.5)
xdata = 100 * np.random.random(20)
ydata = theta_true[0] + theta_true[1] * xdata
# add scatter to points
xdata = np.random.normal(xdata, 10)
ydata = np.random.normal(ydata, 10)
data = {'x': xdata, 'y': ydata}
with pymc.Model() as model:
alpha = pymc.Uniform('intercept', -100, 100)
# Create custom densities
beta = pymc.DensityDist('slope', lambda value: -1.5 * T.log(1 + value**2), testval=0)
sigma = pymc.DensityDist('sigma', lambda value: -T.log(T.abs_(value)), testval=1)
# Create likelihood
like = pymc.Normal('y_est', mu=alpha + beta * xdata, sd=sigma, observed=ydata)
start = pymc.find_MAP()
step = pymc.NUTS(scaling=start) # Instantiate sampler
trace = pymc.sample(10000, step, start=start)
#################################################
# Create some convenience routines for plotting
# All functions below written by Jake Vanderplas
def compute_sigma_level(trace1, trace2, nbins=20):
"""From a set of traces, bin by number of standard deviations"""
L, xbins, ybins = np.histogram2d(trace1, trace2, nbins)
L[L == 0] = 1E-16
logL = np.log(L)
shape = L.shape
L = L.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(L)[::-1]
i_unsort = np.argsort(i_sort)
L_cumsum = L[i_sort].cumsum()
L_cumsum /= L_cumsum[-1]
xbins = 0.5 * (xbins[1:] + xbins[:-1])
ybins = 0.5 * (ybins[1:] + ybins[:-1])
return xbins, ybins, L_cumsum[i_unsort].reshape(shape)
def plot_MCMC_trace(ax, xdata, ydata, trace, scatter=False, **kwargs):
"""Plot traces and contours"""
xbins, ybins, sigma = compute_sigma_level(trace[0], trace[1])
ax.contour(xbins, ybins, sigma.T, levels=[0.683, 0.955], **kwargs)
if scatter:
ax.plot(trace[0], trace[1], ',k', alpha=0.1)
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\beta$')
def plot_MCMC_model(ax, xdata, ydata, trace):
"""Plot the linear model and 2sigma contours"""
ax.plot(xdata, ydata, 'ok')
alpha, beta = trace[:2]
xfit = np.linspace(-20, 120, 10)
yfit = alpha[:, None] + beta[:, None] * xfit
mu = yfit.mean(0)
sig = 2 * yfit.std(0)
ax.plot(xfit, mu, '-k')
ax.fill_between(xfit, mu - sig, mu + sig, color='lightgray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_MCMC_results(xdata, ydata, trace, colors='k'):
"""Plot both the trace and the model together"""
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
plot_MCMC_trace(ax[0], xdata, ydata, trace, True, colors=colors)
plot_MCMC_model(ax[1], xdata, ydata, trace)
pymc_trace = [trace['intercept'],
trace['slope'],
trace['sigma']]
plot_MCMC_results(xdata, ydata, pymc_trace)
plt.show() | apache-2.0 | Python | |
0d7b9e23889b2908e874bda58a119af6b763f04e | Test Case for adding groups | labizon/Python_training | test_add_group.py | test_add_group.py | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Test")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("test")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
cda831f6fc95922ac81e51fd93dc4705f1bdb688 | add keyhac config | sakatam/dotfiles,sakatam/dotfiles,sakatam/dotfiles,sakatam/dotfiles | keyhac/config.py | keyhac/config.py | from keyhac import *
def configure(keymap):
keymap_global = keymap.defineWindowKeymap()
keymap_global[ "Ctrl-H" ] = "Left"
keymap_global[ "Ctrl-J" ] = "Down"
keymap_global[ "Ctrl-K" ] = "Up"
keymap_global[ "Ctrl-L" ] = "Right"
keymap_global[ "Ctrl-Shift-H" ] = "Shift-Left"
keymap_global[ "Ctrl-Shift-J" ] = "Shift-Down"
keymap_global[ "Ctrl-Shift-K" ] = "Shift-Up"
keymap_global[ "Ctrl-Shift-L" ] = "Shift-Right"
keymap_global[ "Cmd-Ctrl-H" ] = "Cmd-Left"
keymap_global[ "Cmd-Ctrl-J" ] = "Cmd-Down"
keymap_global[ "Cmd-Ctrl-K" ] = "Cmd-Up"
keymap_global[ "Cmd-Ctrl-L" ] = "Cmd-Right"
keymap_global[ "Cmd-Ctrl-Shift-H" ] = "Cmd-Shift-Left"
keymap_global[ "Cmd-Ctrl-Shift-J" ] = "Cmd-Shift-Down"
keymap_global[ "Cmd-Ctrl-Shift-K" ] = "Cmd-Shift-Up"
keymap_global[ "Cmd-Ctrl-Shift-L" ] = "Cmd-Shift-Right"
keymap_global[ "Ctrl-F" ] = "PageDown"
keymap_global[ "Ctrl-B" ] = "PageUp"
| mit | Python | |
c68ae63ac4856a6417afb10f53a7609c3958ae32 | Solve Knowit 01 | matslindh/codingchallenges,matslindh/codingchallenges | knowit2017/01.py | knowit2017/01.py | from collections import Counter
def lookify(w):
c = Counter(w)
return ''.join(sorted(c.keys()))
def ngram(n, w):
tokens = ''
i = 0
lw = len(w)
while True:
print(w[i:i+n])
tokens += w[i:i+n]
i += 1
if i+n > lw:
break
return tokens
dictionary = open("input/wordlist.txt").readlines()
#dictionary = ['snowflake', 'mistletoe']
lookup = {}
for word in dictionary:
l = lookify(word.strip())
if l not in lookup:
lookup[l] = []
lookup[l].append(word.strip())
def find_solution(question):
k = lookify(question)
for w in lookup[k]:
c = sorted(question)
for n in range(2, 10):
a = sorted(ngram(n, w))
if a == c:
return str(n) + '-' + w
def test_ngram():
assert 'misiststltleletetotoe' == ngram(3, 'mistletoe')
assert 'snnoowwffllaakke' == ngram(2, 'snowflake')
def test_solution():
assert '2-snowflake' == find_solution('fnaewkfonklsawlo')
assert '3-mistletoe' == find_solution('itseotltmlelteoitetss')
print(find_solution('aeteesasrsssstaesersrrsse')) | mit | Python | |
98a5c93881c6bafa3f5d67af157c56c187d4b3bd | Add a legacy app runner application based on Xephyr | samdroid-apps/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,i5o/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,puneetgkaur/backup_sugar_sugartoolkit,Daksh/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,godiard/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,manuq/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,sugarlabs/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,tchx84/debian-pkg-sugar-toolkit,Daksh/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,puneetgkaur/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,manuq/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3 | legacy/legacy.py | legacy/legacy.py | #!/usr/bin/python -t
# -*- tab-width: 4; indent-tabs-mode: t -*-
import dbus
import dbus.service
import dbus.glib
import pygtk
pygtk.require('2.0')
import gtk, gobject
import sys
import os
import pwd
import gc
import socket
import types
import select
sys.path.append(os.getcwd())
sys.path.append('../shell/example-activity/')
import activity
XEPHYR_PATH = "/usr/bin/Xephyr"
def getfd(filespec, readOnly = 0):
if type(filespec) == types.IntType:
return (filespec, False)
if filespec == None:
filespec = "/dev/null"
flags = os.O_RDWR | os.O_CREAT
if (readOnly):
flags = os.O_RDONLY
fd = os.open(filespec, flags, 0644)
return (fd, True)
def exec_with_redirect(cmd, argv, display, stdin=0, stdout=1, stderr=2, setpgrp=True):
cmd = os.path.abspath(cmd)
if not os.access (cmd, os.X_OK):
raise RuntimeError(cmd + " can not be run")
stdout_opened = False
stderr_opened = False
(stdin, stdin_opened) = getfd(stdin)
if stdout == stderr:
(stdout, stdout_opened) = getfd(stdout)
stderr = stdout
else:
(stdout, stdout_opened) = getfd(stdout)
(stderr, stderr_opened) = getfd(stderr)
childpid = os.fork()
if (not childpid):
# Become leader of a new process group if requested
if setpgrp:
os.setpgrp()
if stdin != 0:
os.dup2(stdin, 0)
os.close(stdin)
if stdout != 1:
os.dup2(stdout, 1)
if stdout != stderr:
os.close(stdout)
if stderr != 2:
os.dup2(stderr, 2)
os.close(stderr)
try:
if display:
os.environ['DISPLAY'] = "0:%d" % display
os.execv(cmd, argv)
except OSError, e:
print "Could not execute command '%s'. Reason: %s" % (cmd, e)
sys.exit(1)
# Close any files we may have opened
if stdin_opened:
os.close(stdin)
if stdout_opened:
os.close(stdout)
if stderr != stdout and stderr_opened:
os.close(stderr)
return childpid
class LegacyActivity(activity.Activity):
def __init__(self, args):
activity.Activity.__init__(self)
self._act_name = os.path.basename(args[1])
self._display = 5
self._args = args[1:]
def _xephyr_function(self, pid, condition, data=None):
print "Xephyr: PID: %d, condition: %s" % (pid, condition)
def _act_function(self, pid, condition, data=None):
print "ACT: PID: %d, condition: %s" % (pid, condition)
def _start(self):
cmd = XEPHYR_PATH
args = []
args.append(XEPHYR_PATH)
args.append(":%d" % self._display)
args.append("-ac")
args.append("-parent")
args.append("%d" % self._plug.get_id())
args.append("-host-cursor")
self._xephyr_pid = exec_with_redirect(cmd, args, None, None)
self._xephyr_watch = gobject.child_watch_add(self._xephyr_pid, self._xephyr_function)
cmd = os.path.abspath(self._args[0])
args = [cmd]
for arg in self._args[1:]:
args.append(arg)
self._act_pid = exec_with_redirect(cmd, args, self._display, None)
self._act_watch = gobject.child_watch_add(self._act_pid, self._act_function)
def activity_on_connected_to_shell(self):
print "act %d: in activity_on_connected_to_shell" % self.activity_get_id()
self.activity_set_tab_text(self._act_name)
self._plug = self.activity_get_gtk_plug()
self._plug.show()
self._start()
def activity_on_disconnected_from_shell(self):
print "act %d: in activity_on_disconnected_from_shell"%self.activity_get_id()
print "act %d: Shell disappeared..."%self.activity_get_id()
gc.collect()
def activity_on_close_from_user(self):
print "act %d: in activity_on_close_from_user"%self.activity_get_id()
self.activity_shutdown()
def activity_on_lost_focus(self):
print "act %d: in activity_on_lost_focus"%self.activity_get_id()
def activity_on_got_focus(self):
print "act %d: in activity_on_got_focus"%self.activity_get_id()
def cleanup(self):
os.kill(self._xephyr_pid, 9)
os.kill(self._act_pid, 9)
def run(self):
try:
gtk.main()
except KeyboardInterrupt:
pass
def main(args):
app = LegacyActivity(args)
app.activity_connect_to_shell()
app.run()
app.cleanup()
if __name__ == "__main__":
main(sys.argv)
| lgpl-2.1 | Python | |
55908f44cb683063cba4a57ee262c3d0074b68e2 | Create GiftWrapping.py | MaximeKjaer/dailyprogrammer-challenges | Challenge-174/03-Hard/GiftWrapping.py | Challenge-174/03-Hard/GiftWrapping.py | from PIL import Image, ImageDraw, ImageOps
from random import randint
n = 26
#Create image
img = Image.new( 'RGB', (101, 101), 'white')
draw = ImageDraw.Draw(img)
points = [(randint(0, 100), randint(0, 100)) for _ in range(n)]
def left(point, line):
"""Determines if a point is to the left of a line"""
x, y = point[0], point[1]
#DETERMINE SLOPE
if line[1][0] - line[0][0] != 0: #If it isn't vertical
slope = (line[1][1] - line[0][1]) / (line[1][0] - line[0][0])
y_intercept = line[0][1] - slope*line[0][0]
else:
slope = 'vertical'
#DETERMINE IF IT IS TO THE LEFT
if line[0][0] > line[1][0]: #If the line goes from left to right, then check if the point is above
return y > slope*x + y_intercept
elif line[0][0] < line[1][0]: #If it goes from right to left, then check if the point is below
return y < slope*x + y_intercept
elif slope == 'vertical' and line[0][1] > line[1][1]: #If it goes from up to down then check if the point is to the right
return x > line[0][1]
elif slope == 'vertical' and line[0][1] < line[1][1]: #If it goes from down to up, then check if the point is to the left
return x < line[0][1]
def jarvis(S):
pointOnHull = min(S)
i = 0
endpoint = ''
P = [0]
while endpoint != P[0]:
if P == [0]:
P[0] = pointOnHull
else:
P.append(pointOnHull)
endpoint = S[0]
for j in range(1, len(S)):
line = [P[i], endpoint]
if (endpoint == pointOnHull) or left(S[j], line):
endpoint = S[j]
i += 1
pointOnHull = endpoint
return P
P = jarvis(points)
draw.polygon(P, outline='red')
draw.point(points, fill="black")
img = img.resize((500, 500))
img = ImageOps.flip(img)
img.save('hull.png', 'PNG')
| mit | Python | |
71d0a5a283a3320926fb017e355aa812301997e5 | Create Malta holidays tests | dr-prodigy/python-holidays | test/countries/test_malta.py | test/countries/test_malta.py | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
import warnings
from datetime import date
import holidays
class TestMT(unittest.TestCase):
def setUp(self):
self.holidays = holidays.MT()
def test_2022(self):
# https://www.gov.mt/en/About%20Malta/Pages/Public%20Holidays.aspx
self.assertIn(date(2022, 1, 1), self.holidays)
self.assertIn(date(2022, 2, 10), self.holidays)
self.assertIn(date(2022, 3, 19), self.holidays)
self.assertIn(date(2022, 3, 31), self.holidays)
self.assertIn(date(2022, 4, 15), self.holidays)
self.assertIn(date(2022, 5, 1), self.holidays)
self.assertIn(date(2022, 6, 7), self.holidays)
self.assertIn(date(2022, 6, 29), self.holidays)
self.assertIn(date(2022, 8, 15), self.holidays)
self.assertIn(date(2022, 9, 8), self.holidays)
self.assertIn(date(2022, 9, 21), self.holidays)
self.assertIn(date(2022, 12, 8), self.holidays)
self.assertIn(date(2022, 12, 13), self.holidays)
self.assertIn(date(2022, 12, 25), self.holidays)
self.assertNotIn(date(2022, 11, 12), self.holidays)
| mit | Python | |
469c2932575daaf42d8cec5578c087f4e5c340af | Add Django REST Framework authentication helpers for JWT | City-of-Helsinki/django-helusers,City-of-Helsinki/django-helusers | helusers/jwt.py | helusers/jwt.py | from django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import exceptions
import random
User = get_user_model()
class JWTAuthentication(JSONWebTokenAuthentication):
def populate_user(self, user, data):
exclude_fields = ['is_staff', 'password', 'is_superuser', 'id']
user_fields = [f.name for f in user._meta.fields if f not in exclude_fields]
changed = False
for field in user_fields:
if field in data:
val = data[field]
if getattr(user, field) != val:
setattr(user, field, val)
changed = True
# Make sure there are no duplicate usernames
tries = 0
while User.objects.filter(username=user.username).exclude(uuid=user.uuid).exists():
user.username = "%s-%d" % (user.username, tries + 1)
changed = True
return changed
def authenticate_credentials(self, payload):
user_id = payload.get('sub')
if not user_id:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(uuid=user_id)
except User.DoesNotExist:
user = User(uuid=user_id)
user.set_unusable_password()
changed = self.populate_user(user, payload)
if changed:
user.save()
return super(JWTAuthentication, self).authenticate_credentials(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
| bsd-2-clause | Python | |
2132d8eb661f7f3ede5f0e436160d59f0a22c413 | Create xlsqlite.py | 6234456/xlsqlite | usr/xlsqlite.py | usr/xlsqlite.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# #####
# @param db: name of the sqlite database
# @param tbl: name of the table in the given db
# @param wb: name of the workbook from which to fetch the data, default to be ".xls" format. no extension needed
# #####
def xl2sql(wb = None, sht = None, db = "src.db", tbl = "src"):
# xlrd is needed to handle the excel io
from xlrd import open_workbook
import os
import sqlite3
os.chdir("\\".join(str(__file__).split("\\")[:-1]))
if not wb:
wb = "src"
book = open_workbook(wb + ".xls", encoding_override="utf-8")
if not sht:
sheet = book.sheet_by_index(0)
else:
sheet = book.sheet_by_name(sht)
conn = sqlite3.connect(db)
cursor = conn.cursor();
# check the data type of the cells, store in the dict col => (callback, name)
types = {}
callbacks = {}
sql_create_table = "CREATE TABLE [" + tbl + "] ( "
for c in range(sheet.ncols):
callbacks[c], types[c] = type_mapping(sheet.cell(1,c).ctype)
if types[c]:
sql_create_table = sql_create_table + " [" + sheet.cell(0,c).value +"] " + types[c] + ","
sql_create_table = sql_create_table[:-1] + " );"
cursor.execute(sql_create_table)
# insert the records
sql_insert_value = "INSERT INTO [" + sheet.name + "] VALUES ( "
for r in range(1, sheet.nrows):
for c in range(sheet.ncols):
if types[c]:
if types[c] == "TEXT" or types[c] == "DATETEXT":
sql_insert_value = sql_insert_value + callbacks[c](sheet.cell(r,c).value).decode(encoding='UTF-8',errors='strict') + ","
else:
sql_insert_value = sql_insert_value + str(callbacks[c](sheet.cell(r,c).value)) + ","
sql_insert_value = sql_insert_value[:-1] + ");"
try:
cursor.execute(sql_insert_value)
except sqlite3.OperationalError:
print sql_insert_value
return
sql_insert_value = "INSERT INTO [" + sheet.name + "] VALUES ( "
conn.commit()
conn.close()
def xldate2str(d):
from xlrd import xldate_as_tuple
from datetime import date
a = xldate_as_tuple(d,0)
return date(a[0],a[1],a[2]).strftime("'%Y-%m-%d'")
def sqlstr(s):
try:
res = "'" + str(s).replace("'","''") + "'"
except UnicodeEncodeError:
res = "'" + s.encode(encoding='UTF-8',errors='strict').replace("'","''") + "'"
return res
def type_mapping(t):
if t == 1:
return (sqlstr, "TEXT")
elif t == 2:
return (float, "REAL")
elif t == 3:
return (xldate2str,"DATETEXT")
elif t == 4:
return (int,"INTEGER")
else:
return (None,None)
# #####
# @param db: name of the sqlite database
# @param tbl: name of the table in the given db
# @param query: SQL to be executed
# @param wb: name of the workbook to store the query result, default to be ".xls" format
# #####
def sql2xl(db = "src.db", tbl = "src", query = None, wb = None):
import os
import sqlite3
import time
from xlwt import Workbook
os.chdir("\\".join(str(__file__).split("\\")[:-1]))
if not wb:
wb = time.strftime("%d%m%Y", time.localtime())
sql = sqlite3.connect(db)
cursor = sql.cursor()
w = Workbook(encoding = "utf-8")
sht = w.add_sheet(tbl)
r = 1
c = 0
cnt = 1
if not query:
query = "SELECT * FROM ["+ tbl + "];"
for i in cursor.execute(query):
for j in i:
sht.write(r,c,j)
c = c + 1
r = r + 1
c = 0
for i in cursor.description:
sht.write(0, c, i[0])
c = c + 1
sql.close()
w.save(wb + ".xls")
print "DONE!"
if __name__ == "__main__":
# xl2sql()
sql2xl(query = "SELECT * FROM src WHERE ")
| mit | Python | |
672d76c4c904c3e9cf572310a218be533288e01e | Add API_class.py | franckbrignoli/twitter-bot-detection | API_class.py | API_class.py | class API_config():
def __init__(self, app_config,tweepy):
self.app_config = app_config
self.tweepy = app_config
def API_launch(self):
# Twitter API configuration
consumer_key = app_config.twitter["consumer_key"]
consumer_secret = app_config.twitter["consumer_secret"]
access_token = app_config.twitter["access_token"]
access_token_secret = app_config.twitter["access_token_secret"]
# Start
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
API_test = API_config(app_config,tweepy)
API_start = API_test.API_launch()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.