commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
8d1ef1f33cc6f10a58cdeacc0fd840dea245e7a6 | Create typecheck.py with all the code | typecheck.py | typecheck.py | #!/usr/bin/env python3
from typing import (Union,
Tuple,
Any,
TypeVar,
Type,
List)
def check_type(obj, candidate_type, reltype='invariant') -> bool:
if reltype not in ['invariant', 'covariant', 'contravariant']:
raise ValueError(f' Variadic type {reltype} is unknown')
# builtin type like str, or a class
if type(candidate_type) == type and reltype in ['invariant']:
return isinstance(obj, candidate_type)
if type(candidate_type) == type and reltype in ['covariant']:
return issubclass(obj.__class__, candidate_type)
if type(candidate_type) == type and reltype in ['contravariant']:
return issubclass(candidate_type, obj.__class__)
# Any accepts everything
if type(candidate_type) == type(Any):
return True
# Union, at least one match in __args__
if type(candidate_type) == type(Union):
return any(check_type(obj, t, reltype) for t in candidate_type.__args__)
# Tuple, each element matches the corresponding type in __args__
if type(candidate_type) == type(Tuple):
if not hasattr(obj, '__len__'):
return False
if len(candidate_type.__args__) != len(obj):
return False
return all(check_type(o, t, reltype) for (o, t) in zip(obj, candidate_type.__args__))
# List, each element matches the type in __args__
if type(candidate_type) == type(List):
if not hasattr(obj, '__len__'):
return False
return all(check_type(o, candidate_type.__args__[0], reltype) for o in obj)
# TypeVar, this is tricky
if type(candidate_type) == type(TypeVar):
# TODO consider contravariant, variant and bound
# invariant with a list of constraints, acts like a Tuple
if not (candidate_type.__covariant__ or candidate_type.__contracovariant__) and len(candidate_type.__constraints__) > 0:
return any(check_type(obj, t) for t in candidate_type.__constraints__)
if type(candidate_type) == type(Type):
return check_type(obj, candidate_type.__args__[0], reltype='covariant')
raise ValueError(f'Cannot check against {reltype} type {candidate_type}')
assert check_type(3, Any)
assert check_type([5, "hi"], Any)
assert check_type(3, int)
assert not check_type(3, float)
assert check_type(3, Union[int, str])
assert check_type("hello", Union[int, str])
assert not check_type(4.78, Union[int, str])
assert check_type((1, 67), Tuple[int, int])
assert not check_type((1, "new york"), Tuple[int, int])
# NOTE not a tuple, but the whole object is immutable being a JSON received from HTTP
assert check_type([1, "new york"], Tuple[int, str])
assert check_type((1, 67, "Amsterdam"), Tuple[int, int, str])
assert not check_type(("Amsterdam", 1, 67), Tuple[int, int, str])
assert check_type([1, 27, 33, 1956], List[int])
assert not check_type([1.11, 27, 33, 1956], List[int])
assert not check_type([1, 27, 33, 1956, "h", 42], List[int])
assert check_type([1, 27, 33, 1956], List[Union[str, int]])
assert check_type([(12, "Texas"), (-5, "Particle")], List[Tuple[int, str]])
assert not check_type([(1.9, "Texas"), (-5, "Particle")], List[Tuple[int, str]])
assert not check_type([1.11, 27, 33, 1956], List[Tuple[int, str]])
| Python | 0.000001 | |
1a9302d984e8fd0e467a04c87428b64d874e5f04 | refactor customerWallet | usermanage/views/customerWallet.py | usermanage/views/customerWallet.py | from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required, user_passes_test, permission_required
from django.contrib.auth.forms import UserCreationForm
from customermanage.models import Coupon, Wallet
from storemanage.models import Ticket
# Create your views here.
from usermanage import models
@login_required()
@permission_required('usermanage.customer_rights',raise_exception=True)
def customerWallet(request):
user = request.user
wallets = [{'name':w.currency.name,'amount':w.amount} for w in Wallet.objects.filter(user=user)]
print(wallets)
return render(request, 'index/wallet.html',{'wallets':wallets})
| Python | 0.999998 | |
b9f28570ba619db5adacb05a7eadab77f140e876 | Create __init__.py | fake_data_crud_service/rest/__init__.py | fake_data_crud_service/rest/__init__.py | __package__ = 'rest'
__author__ = 'Barbaglia, Guido'
__email__ = 'guido.barbaglia@gmail.com;'
__license__ = 'MIT'
| Python | 0.000429 | |
192e60955051f8ffb34f6cc1f1e3f226acb1b5fb | add missing primary key constraints (#7129) | warehouse/migrations/versions/b5bb5d08543d_create_missing_primary_key_constraints.py | warehouse/migrations/versions/b5bb5d08543d_create_missing_primary_key_constraints.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
create missing primary key constraints
Revision ID: b5bb5d08543d
Revises: 08aedc089eaf
Create Date: 2019-12-19 14:27:47.230249
"""
from alembic import op
revision = "b5bb5d08543d"
down_revision = "08aedc089eaf"
def upgrade():
op.create_primary_key(None, "release_files", ["id"])
op.create_primary_key(None, "release_dependencies", ["id"])
op.create_primary_key(None, "roles", ["id"])
def downgrade():
raise RuntimeError("Order No. 227 - Ни шагу назад!")
| Python | 0 | |
a3d3040f16a604b534406d2f59a841d7ef6cebfa | Test HTTPMediaWikiAPI.get_content() | tests/test_api.py | tests/test_api.py | import requests
from unittest import TestCase
from mfnf.api import HTTPMediaWikiAPI
class TestHTTPMediaWikiAPI(TestCase):
def setUp(self):
self.api = HTTPMediaWikiAPI(requests.Session())
def test_get_content(self):
content = self.api.get_content("Mathe für Nicht-Freaks: Epsilon-Delta-Kriterium der Stetigkeit")
self.assertTrue(content.startswith("{{#invoke:Mathe für Nicht-Freaks"))
| Python | 0.000001 | |
8139dc9e04025da001323122521951f5ed2c391b | Fix mysql encoding for users.profile.reason | users/migrations/0010_users-profile-encoding.py | users/migrations/0010_users-profile-encoding.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-25 01:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0009_remove_profile_active'),
]
operations = [
migrations.RunSQL("ALTER DATABASE default CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile MODIFY reason LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
]
| Python | 0.001314 | |
328901c74d1ee103a1ee5b2f26aa391ddeda465b | Add unit test for webpage creation and description | tests/test_web.py | tests/test_web.py | """Test the AutoCMS web reporting functionality."""
import os
import shutil
import unittest
import re
from autocms.core import load_configuration
from autocms.web import (
produce_default_webpage
)
class TestWebPageCreation(unittest.TestCase):
"""Test the accurate creation of test webpages."""
def setUp(self):
self.config = load_configuration('autocms.cfg.example')
self.config['AUTOCMS_WEBDIR'] = self.config['AUTOCMS_BASEDIR']
# call the scratch directory 'uscratch' instead of 'scratch'
# so that in pathological cases one does not resolve to
# /scratch which is often used.
self.testdir = os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch')
os.makedirs(self.testdir)
self.page_description = 'AutoCMS Web Unit Test Description'
description_file = os.path.join(self.testdir, 'description.html')
with open(description_file, 'w') as description_filehandle:
description_filehandle.write(self.page_description)
def tearDown(self):
shutil.rmtree(os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch'))
def test_create_webpage_with_description(self):
"""Test that a default webpage is created with description."""
records = []
produce_default_webpage(records, 'uscratch', self.config)
webpage_path = os.path.join(self.config['AUTOCMS_WEBDIR'],
'uscratch/index.html')
self.assertTrue(os.path.isfile(webpage_path))
with open(webpage_path) as webpage:
webpage_contents = webpage.read()
self.assertTrue(re.search(self.page_description, webpage_contents))
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
e6b086f3baef34cf1e5278e930a034a92f4eee76 | Add test for DirectionalGridCRF | tests/test_directional_crf.py | tests/test_directional_crf.py | import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
#from nose.tools import assert_almost_equal
import pystruct.toy_datasets as toy
from pystruct.lp_new import lp_general_graph
from pystruct.inference_methods import _make_grid_edges
from pystruct.crf import DirectionalGridCRF
def test_inference():
# Test inference with different weights in different directions
X, Y = toy.generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edges = _make_grid_edges(x, neighborhood=4)
edge_list = _make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# generate edge weights
edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :],
edge_list[0].shape[0], axis=0)
edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :],
edge_list[1].shape[0], axis=0)
edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical])
# do inference
res = lp_general_graph(-x.reshape(-1, n_states), edges, edge_weights,
exact=False)
# sam inference through CRF inferface
crf = DirectionalGridCRF(n_states=3, inference_method='lp')
w = np.hstack([np.ones(3), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
assert_array_almost_equal(res[0], y_pred[0].reshape(-1, n_states))
assert_array_almost_equal(res[1], y_pred[1])
assert_array_equal(y, np.argmax(y_pred[0], axis=-1))
| Python | 0 | |
439e4b740f6903341e81e158e6591c9cbd242a4c | Check in a tool that dumps graphviz output. | tools/graphviz.py | tools/graphviz.py | #!/usr/bin/python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
if __name__ == '__main__':
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
sys.exit(1)
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
| Python | 0 | |
80d9a407d76f11573af5ccb6783f837b939b5466 | Add Python benchmark | lib/node_modules/@stdlib/math/base/special/erfinv/benchmark/python/benchmark.scipy.py | lib/node_modules/@stdlib/math/base/special/erfinv/benchmark/python/benchmark.scipy.py | #!/usr/bin/env python
"""Benchmark scipy.special.erfinv."""
import timeit
name = "erfinv"
repeats = 3
iterations = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.special import erfinv; from random import random;"
stmt = "y = erfinv(2.0*random() - 1.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(3):
print("# python::" + name)
elapsed = t.timeit(number=iterations)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(repeats, repeats)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| Python | 0.000138 | |
3133bbfcb5ee56c88ea20be21778519bffe77299 | Add another different type of book | literotica.py | literotica.py | from common import *
from sys import argv
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE
cache = Cache()
url = argv[1]
titlePattern = compile("<h1>([^<]+)</h1>")
contentPattern = compile("<div class=\"b-story-body-x x-r15\">(.+?)</div><div class=\"b-story-stats-block\">" , DOTALL|MULTILINE)
nextPattern = compile("\"([^\"]+)\">Next</a>")
page = cache.get(url, max_age = -1)
data = page.read()
open("dump", "wb").write(data)
title = titlePattern.findall(data)
print title
title = title[0]
content = u""
while True:
contentMatch = contentPattern.findall(data)
print page.headers.headers
print type(data)
content += contentMatch[0]
#print content
nextMatch = nextPattern.findall(data)
if nextMatch == []:
break
nextURL = nextMatch[0]
print nextURL
page = cache.get(nextURL, max_age=-1)
data = page.read()
toc = tocStart(title)
generatePage(url, title, content, title, toc)
tocEnd(toc)
| Python | 0.999663 | |
f31d6730a0cfbc50c55e9260391f399e77c3d631 | access the repository from console | utils/__init__.py | utils/__init__.py | __version__="0.1"
| Python | 0.000001 | |
893679baff0367538bdf3b52b04f8bae72732be8 | Add migration to remove system avatar source. | zerver/migrations/0031_remove_system_avatar_source.py | zerver/migrations/0031_remove_system_avatar_source.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar_source',
field=models.CharField(choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')], max_length=1, default='G'),
),
]
| Python | 0 | |
ff89cda5f77bec569c7451c9ee72ef7c028f7552 | Add sample extraction script | extract_samples.py | extract_samples.py | import sys, os
import numpy as np
import pandas as pd
import datetime
if __name__ == '__main__':
infile = sys.argv[1]
csv_content = pd.read_csv(infile, [0])
| Python | 0 | |
73f47cc6a8a98b2026ee27985f8c3042352c941b | Add lc066_plus_one.py | lc066_plus_one.py | lc066_plus_one.py | """Leetcode 66. Plus One
Easy
URL: https://leetcode.com/problems/plus-one/
Given a non-empty array of digits representing a non-negative integer,
plus one to the integer.
The digits are stored such that the most significant digit is at the
head of the list, and each element in the array contain a single digit.
You may assume the integer does not contain any leading zero,
except the number 0 itself.
Example 1:
Input: [1,2,3]
Output: [1,2,4]
Explanation: The array represents the integer 123.
Example 2:
Input: [4,3,2,1]
Output: [4,3,2,2]
Explanation: The array represents the integer 4321.
"""
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000849 | |
a620cc46d97f80ef658c46130f0448c36844d847 | Add alembic revision | alembic/versions/63b625cf7b06_add_white_rabbit_status.py | alembic/versions/63b625cf7b06_add_white_rabbit_status.py | """add white rabbit status
Revision ID: 63b625cf7b06
Revises: e83aa47e530b
Create Date: 2019-12-06 02:45:01.418693+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '63b625cf7b06'
down_revision = 'e83aa47e530b'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('node_white_rabbit_status',
sa.Column('node_time', sa.BigInteger(), nullable=False),
sa.Column('node', sa.Integer(), nullable=False),
sa.Column('board_info_str', sa.String(), nullable=True),
sa.Column('aliases', sa.String(), nullable=True),
sa.Column('ip', sa.String(), nullable=True),
sa.Column('mode', sa.String(), nullable=True),
sa.Column('serial', sa.String(), nullable=True),
sa.Column('temperature', sa.Float(), nullable=True),
sa.Column('build_date', sa.BigInteger(), nullable=True),
sa.Column('gw_date', sa.BigInteger(), nullable=True),
sa.Column('gw_version', sa.String(), nullable=True),
sa.Column('gw_id', sa.String(), nullable=True),
sa.Column('build_hash', sa.String(), nullable=True),
sa.Column('manufacture_tag', sa.String(), nullable=True),
sa.Column('manufacture_device', sa.String(), nullable=True),
sa.Column('manufacture_date', sa.BigInteger(), nullable=True),
sa.Column('manufacture_partnum', sa.String(), nullable=True),
sa.Column('manufacture_serial', sa.String(), nullable=True),
sa.Column('manufacture_vendor', sa.String(), nullable=True),
sa.Column('port0_ad', sa.Integer(), nullable=True),
sa.Column('port0_link_asymmetry_ps', sa.Integer(), nullable=True),
sa.Column('port0_manual_phase_ps', sa.Integer(), nullable=True),
sa.Column('port0_clock_offset_ps', sa.Integer(), nullable=True),
sa.Column('port0_cable_rt_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_master_slave_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_master_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_slave_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_master_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_slave_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_hd', sa.Integer(), nullable=True),
sa.Column('port0_link', sa.Boolean(), nullable=True),
sa.Column('port0_lock', sa.Boolean(), nullable=True),
sa.Column('port0_md', sa.Integer(), nullable=True),
sa.Column('port0_rt_time_ps', sa.Integer(), nullable=True),
sa.Column('port0_nsec', sa.Integer(), nullable=True),
sa.Column('port0_packets_received', sa.Integer(), nullable=True),
sa.Column('port0_phase_setpoint_ps', sa.Integer(), nullable=True),
sa.Column('port0_servo_state', sa.String(), nullable=True),
sa.Column('port0_sv', sa.Integer(), nullable=True),
sa.Column('port0_sync_source', sa.String(), nullable=True),
sa.Column('port0_packets_sent', sa.Integer(), nullable=True),
sa.Column('port0_update_counter', sa.Integer(), nullable=True),
sa.Column('port0_time', sa.BigInteger(), nullable=True),
sa.Column('port1_ad', sa.Integer(), nullable=True),
sa.Column('port1_link_asymmetry_ps', sa.Integer(), nullable=True),
sa.Column('port1_manual_phase_ps', sa.Integer(), nullable=True),
sa.Column('port1_clock_offset_ps', sa.Integer(), nullable=True),
sa.Column('port1_cable_rt_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_master_slave_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_master_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_slave_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_master_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_slave_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_hd', sa.Integer(), nullable=True),
sa.Column('port1_link', sa.Boolean(), nullable=True),
sa.Column('port1_lock', sa.Boolean(), nullable=True),
sa.Column('port1_md', sa.Integer(), nullable=True),
sa.Column('port1_rt_time_ps', sa.Integer(), nullable=True),
sa.Column('port1_nsec', sa.Integer(), nullable=True),
sa.Column('port1_packets_received', sa.Integer(), nullable=True),
sa.Column('port1_phase_setpoint_ps', sa.Integer(), nullable=True),
sa.Column('port1_servo_state', sa.String(), nullable=True),
sa.Column('port1_sv', sa.Integer(), nullable=True),
sa.Column('port1_sync_source', sa.String(), nullable=True),
sa.Column('port1_packets_sent', sa.Integer(), nullable=True),
sa.Column('port1_update_counter', sa.Integer(), nullable=True),
sa.Column('port1_time', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('node_time', 'node')
)
def downgrade():
op.drop_table('node_white_rabbit_status')
| Python | 0.000001 | |
b51398d602a157ce55fd7e08eedd953051f716a1 | Add script to update uploaded files. | backend/scripts/updatedf.py | backend/scripts/updatedf.py | #!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
| Python | 0 | |
ba3582d1e4521c040ef9f43c3a4760eb4fd694da | add lib/config_loader.py | hokusai/lib/config_loader.py | hokusai/lib/config_loader.py | import os
import tempfile
import shutil
from urlparse import urlparse
import boto3
import yaml
from hokusai.lib.common import get_region_name
from hokusai.lib.exceptions import HokusaiError
class ConfigLoader
def __init__(self, uri):
self.uri = uri
def load(self):
uri = urlparse(self.uri)
if not uri.path.endswith('yaml') or not uri.path.endswith('yml'):
raise HokusaiError('Uri must be of Yaml file type')
tmpdir = tempfile.mkdtemp()
switch(uri.scheme):
case 's3':
client = boto3.client('s3', region_name=get_region_name())
tmp_configfile = os.path.join(tmpdir, 'config')
client.download_file(uri.netloc, uri.path.lstrip('/'), tmp_configfile)
default:
tmp_configfile = uri.path
with open(tmp_configfile, 'r') as f:
struct = yaml.safe_load(f.read())
if type(struct) is not obj:
raise HokusaiError('Yaml is invalid')
return struct
| Python | 0.000002 | |
21e766688e3cc4d08339f81c35dba43d26010a6d | edit vehicle form | vehicles/forms.py | vehicles/forms.py | from django import forms
class EditVehicleForm(forms.Form):
fleet_number = forms.CharField(label='Fleet number', required=False)
reg = forms.CharField(label='Registration', required=False)
vehicle_type = forms.CharField(label='Type', required=False)
colours = forms.CharField(label='Colours', required=False)
notes = forms.CharField(label='Notes', required=False)
| Python | 0 | |
fbf36a2fb52b5ed1aceaec4c1d1075448584a97d | Test that modules can be imported in any order | tests/test_imports.py | tests/test_imports.py | """Test that all modules/packages in the lektor tree are importable in any order
Here we import each module by itself, one at a time, each in a new
python interpreter.
"""
import pkgutil
import sys
from subprocess import run
import pytest
import lektor
def iter_lektor_modules():
for module in pkgutil.walk_packages(lektor.__path__, f"{lektor.__name__}."):
yield module.name
@pytest.fixture(params=iter_lektor_modules())
def module(request):
return request.param
def test_import(module):
python = sys.executable
assert run([python, "-c", f"import {module}"], check=False).returncode == 0
| Python | 0 | |
e3bdccc8c7ef23b449a53043f4a048fe71cd642c | Use an explicit list due to the filter-object type of python3 | accounting/apps/connect/views.py | accounting/apps/connect/views.py | from django.views import generic
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from accounting.apps.books.models import Organization
from .steps import (
CreateOrganizationStep,
ConfigureTaxRatesStep,
ConfigureBusinessSettingsStep,
ConfigureFinancialSettingsStep,
AddEmployeesStep,
ConfigurePayRunSettingsStep,
AddFirstClientStep,
AddFirstInvoiceStep)
class RootRedirectionView(generic.View):
"""
Redirect to the books if an organization is already configured
Otherwise we begin the step by step creation process to help the user
begin and configure his books
"""
def get(self, *args, **kwargs):
if Organization.objects.all().count():
return HttpResponseRedirect(reverse('books:dashboard'))
class GettingStartedView(generic.TemplateView):
template_name = "connect/getting_started.html"
def get_steps(self, request):
user = request.user
steps = steps = [
CreateOrganizationStep(user),
ConfigureTaxRatesStep(user),
ConfigureBusinessSettingsStep(user),
ConfigureFinancialSettingsStep(user),
AddEmployeesStep(user),
ConfigurePayRunSettingsStep(user),
AddFirstClientStep(user),
AddFirstInvoiceStep(user),
]
return steps
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
request = self.request
steps = self.get_steps(self.request)
uncomplete_filter = lambda s: not s.completed(request)
uncompleted_steps = list(filter(uncomplete_filter, steps))
try:
next_step = next(s for s in uncompleted_steps)
except StopIteration:
next_step = None
ctx['steps'] = steps
ctx['next_step'] = next_step
ctx['all_steps_completed'] = bool(next_step is None)
return ctx
def post(self, request, *args, **kwargs):
steps = self.get_steps(request)
uncompleted_steps = filter(lambda s: not s.completed(request), steps)
if not len(uncompleted_steps):
return super().post(request, *args, **kwargs)
# unmark the session as getting started
request.sessions['getting_started_done'] = True
return HttpResponseRedirect(reverse('books:dashboard'))
| from django.views import generic
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from accounting.apps.books.models import Organization
from .steps import (
CreateOrganizationStep,
ConfigureTaxRatesStep,
ConfigureBusinessSettingsStep,
ConfigureFinancialSettingsStep,
AddEmployeesStep,
ConfigurePayRunSettingsStep,
AddFirstClientStep,
AddFirstInvoiceStep)
class RootRedirectionView(generic.View):
"""
Redirect to the books if an organization is already configured
Otherwise we begin the step by step creation process to help the user
begin and configure his books
"""
def get(self, *args, **kwargs):
if Organization.objects.all().count():
return HttpResponseRedirect(reverse('books:dashboard'))
class GettingStartedView(generic.TemplateView):
template_name = "connect/getting_started.html"
def get_steps(self, request):
user = request.user
steps = steps = [
CreateOrganizationStep(user),
ConfigureTaxRatesStep(user),
ConfigureBusinessSettingsStep(user),
ConfigureFinancialSettingsStep(user),
AddEmployeesStep(user),
ConfigurePayRunSettingsStep(user),
AddFirstClientStep(user),
AddFirstInvoiceStep(user),
]
return steps
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
request = self.request
steps = self.get_steps(self.request)
uncompleted_steps = filter(lambda s: not s.completed(request), steps)
try:
next_step = next(uncompleted_steps)
except StopIteration:
next_step = None
ctx['steps'] = steps
ctx['next_step'] = next_step
ctx['all_steps_completed'] = bool(next_step is None)
return ctx
def post(self, request, *args, **kwargs):
steps = self.get_steps(request)
uncompleted_steps = filter(lambda s: not s.completed(request), steps)
if not len(uncompleted_steps):
return super().post(request, *args, **kwargs)
# unmark the session as getting started
request.sessions['getting_started_done'] = True
return HttpResponseRedirect(reverse('books:dashboard'))
| Python | 0.000005 |
b7fff47b228fbe8774c9f465c383ae1015c598fe | use cvmfs.py for openRootCatalog.py | add-ons/tools/openRootCatalog.py | add-ons/tools/openRootCatalog.py | #!/usr/bin/python
import cvmfs
import sys
def usage():
print sys.argv[0] + " <repository path | repository url>"
print "This script decompresses the root catalog file to a temporary storage"
print "and opens this directly with sqlite3."
print "WARNING: changes to this database will not persist, as it is only a temp"
def main():
if len(sys.argv) != 2:
usage()
sys.exit(1)
repo = cvmfs.OpenRepository(sys.argv[1])
root_clg = repo.RetrieveRootCatalog()
root_clg.OpenInteractive()
main()
| #!/usr/bin/python
import sys
import zlib
import tempfile
import subprocess
def getRootCatalogName(cvmfspublished):
try:
cvmfspubdata = open(cvmfspublished, 'rb').read()
except:
print "cannot open .cvmfspublished"
sys.exit(1)
lines = cvmfspubdata.split('\n')
if len(lines) < 1:
print ".cvmfspublished is malformed"
sys.exit(1)
return lines[0][1:]
def decompressCatalog(filename, destination):
str_object1 = open(filename, 'rb').read()
str_object2 = zlib.decompress(str_object1)
f = open(destination, 'wb')
f.write(str_object2)
f.close()
def openCatalog(filename):
subprocess.call(['sqlite3', filename])
def usage():
print sys.argv[0] + " <repository name>"
print "This script decompresses the root catalog file to a temporary storage"
print "and opens this directly with sqlite3."
print "WARNING: changes to this database will not persist, as it is only a temp"
def main():
if len(sys.argv) != 2:
usage()
sys.exit(1)
repoDir = "/srv/cvmfs/" + sys.argv[1] + "/";
rootCatalog = getRootCatalogName(repoDir + ".cvmfspublished")
myTmpFile = tempfile.NamedTemporaryFile('wb')
decompressCatalog(repoDir + "data/" + rootCatalog[:2] + "/" + rootCatalog[2:] + "C", myTmpFile.name)
openCatalog(myTmpFile.name)
myTmpFile.close()
main() | Python | 0 |
a8ca46a8d964907038f6c096a316175543bc2518 | add mask_iou test | tests/utils_tests/mask_tests/test_mask_iou.py | tests/utils_tests/mask_tests/test_mask_iou.py | from __future__ import division
import unittest
import numpy as np
from chainer import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.utils import mask_iou
@testing.parameterize(
{'mask_a': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]]],
dtype=np.bool),
'mask_b': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]],
[[True, False], [True, True]],
[[True, True], [False, True]]],
dtype=np.bool),
'expected': np.array(
[[1., 0., 2 / 3, 1 / 4],
[0., 1., 1 / 4, 2 / 3]],
dtype=np.float32)
},
{'mask_a': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]],
[[True, True], [True, False]],
[[False, True], [True, True]]],
dtype=np.bool),
'mask_b': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]]],
dtype=np.bool),
'expected': np.array(
[[1., 0.], [0., 1.], [1 / 4, 2 / 3], [2 / 3, 1 / 4]],
dtype=np.float32)
},
{'mask_a': np.zeros((0, 2, 2), dtype=np.bool),
'mask_b': np.array([[[False, False], [False, False]]], dtype=np.bool),
'expected': np.zeros((0, 1), dtype=np.float32)
},
)
class TestMaskIou(unittest.TestCase):
def check(self, mask_a, mask_b, expected):
iou = mask_iou(mask_a, mask_b)
self.assertIsInstance(iou, type(expected))
np.testing.assert_equal(
cuda.to_cpu(iou),
cuda.to_cpu(expected))
def test_mask_iou_cpu(self):
self.check(self.mask_a, self.mask_b, self.expected)
@attr.gpu
def test_mask_iou_gpu(self):
self.check(
cuda.to_gpu(self.mask_a),
cuda.to_gpu(self.mask_b),
cuda.to_gpu(self.expected))
@testing.parameterize(
{'mask_a': np.array([[[False], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False, False], [True, True]]], dtype=np.bool)
},
{'mask_a': np.array([[[False, False, True], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False, False], [True, True]]], dtype=np.bool)
},
{'mask_a': np.array([[[False, False], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False], [True, True]]], dtype=np.bool)
},
{'mask_a': np.array([[[False, False], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False, False, True], [True, True]]], dtype=np.bool)
},
)
class TestMaskIouInvalidShape(unittest.TestCase):
def test_mask_iou_invalid(self):
with self.assertRaises(IndexError):
mask_iou(self.mask_a, self.mask_b)
testing.run_module(__name__, __file__)
| Python | 0.000069 | |
a377195fa95b819924ddfbd3fb564cffbe08f9ae | Add an example for solvent model to customize solvent cavity | examples/solvent/30-custom_solvent_cavity.py | examples/solvent/30-custom_solvent_cavity.py | #!/usr/bin/env python
'''
Custom solvent cavity
'''
import numpy
from pyscf import gto, qmmm, solvent
#
# Case 1. Cavity for dummy atoms with basis on the dummy atoms
#
mol = gto.M(atom='''
C 0.000000 0.000000 -0.542500
O 0.000000 0.000000 0.677500
H 0.000000 0.9353074360871938 -1.082500
H 0.000000 -0.9353074360871938 -1.082500
X-C 0.000000 0.000000 -1.5
X-O 0.000000 0.000000 1.6
''',
verbose = 4)
sol = solvent.ddCOSMO(mol)
cavity_radii = sol.get_atomic_radii()
cavity_radii[4] = 3.0 # Bohr, for X-C
cavity_radii[5] = 2.5 # Bohr, for X-O
# Overwrite the get_atom_radii method to feed the custom cavity into the solvent model
sol.get_atomic_radii = lambda: cavity_radii
mf = mol.RHF().ddCOSMO(sol)
mf.run()
#
# Case 2. Cavity for dummy atoms (without basis)
#
mol = gto.M(atom='''
C 0.000000 0.000000 -0.542500
O 0.000000 0.000000 0.677500
H 0.000000 0.9353074360871938 -1.082500
H 0.000000 -0.9353074360871938 -1.082500
''',
verbose = 4)
# Use a MM molecule to define cavity from dummy atoms.
# See also the example 22-with_qmmm.py
coords = numpy.array([
[0, 0, -1.5],
[0, 0, 1.6],
])
charges = numpy.array([0, 0])
mm_atoms = [('X', c) for c in coords]
mm_mol = qmmm.create_mm_mol(mm_atoms, charges)
# Make a giant system include both QM and MM particles
qmmm_mol = mol + mm_mol
# The solvent model is based on the giant system
sol = solvent.ddCOSMO(qmmm_mol)
cavity_radii = sol.get_atomic_radii()
# Custom cavity
cavity_radii[4] = 3.0 # Bohr
cavity_radii[5] = 2.5 # Bohr
# Overwrite the get_atom_radii method to feed the custom cavity into the solvent model
sol.get_atomic_radii = lambda: cavity_radii
mf = mol.RHF().QMMM(coords, charges)
mf = mf.ddCOSMO(sol)
mf.run()
| Python | 0 | |
97ecb8f7dbcb36cfa9e2d180f29d29002eea127e | add elasticsearch import | examples/ElasticsearchIntegrationWithSpark/import_from_elasticsearch.py | examples/ElasticsearchIntegrationWithSpark/import_from_elasticsearch.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
from operator import add
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import SQLContext
if __name__ == "__main__":
if len(sys.argv) != 7:
print("Usage: export_to_elasticsearch.py <host> <port> <user> <pass> <tmpdir> <tmpHdfsDir>", file=sys.stderr)
exit(-1)
host = sys.argv[1]
port = sys.argv[2]
user = sys.argv[3]
password = sys.argv[4]
tmpDir = sys.argv[5]
tmpHdfsDir = sys.argv[6]
conf = SparkConf().setAppName("Elasticsearch example")
# see https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html
conf.set("es.nodes",host)
conf.set("es.port",str(port))
conf.set("es.net.http.auth.user",user)
conf.set("es.net.http.auth.pass",password)
conf.set("es.net.ssl","true")
conf.set("es.net.ssl.truststore.location","truststore.jks")
conf.set("es.net.ssl.truststore.pass","mypassword")
conf.set("es.nodes.wan.only","true")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
# read the data from elasticsearch
esdata = sqlContext.read.format("es").load("spark/{0}".format(tmpDir))
# save to hdfs
esdata.rdd.saveAsTextFile(tmpHdfsDir)
sc.stop()
| Python | 0 | |
d5585ff269868ed5407bc573a851860fdb35a5ec | Create vnrpc.py | vnpy/rpc/vnrpc.py | vnpy/rpc/vnrpc.py | import threading
import traceback
import signal
import zmq
from msgpack import packb, unpackb
from json import dumps, loads
import cPickle
p_dumps = cPickle.dumps
p_loads = cPickle.loads
# Achieve Ctrl-c interrupt recv
signal.signal(signal.SIGINT, signal.SIG_DFL)
class RpcObject(object):
"""
Referred to serialization of packing and unpacking, we offer 3 tools:
1) maspack: higher performance, but usually requires the installation of msgpack related tools;
2) jason: Slightly lower performance but versatility is better, most programming languages have built-in libraries;
3) cPickle: Lower performance and only can be used in Python, but it is very convenient to transfer Python objects directly.
Therefore, it is recommended to use msgpack.
Use json, if you want to communicate with some languages without providing msgpack.
Use cPickle, when the data being transferred contains many custom Python objects.
"""
def __init__(self):
"""
Constructor
Use msgpack as default serialization tool
"""
self.use_msgpack()
def pack(self, data):
""""""
pass
def unpack(self, data):
""""""
pass
def __json_pack(self, data):
"""
Pack with json
"""
return dumps(data)
def __json_unpack(self, data):
"""
Unpack with json
"""
return loads(data)
def __msgpack_pack(self, data):
"""
Pack with msgpack
"""
return packb(data)
def __msgpack_unpack(self, data):
"""
Unpack with msgpack
"""
return unpackb(data)
def __pickle_pack(self, data):
"""
Pack with cPickle
"""
return p_dumps(data)
def __pickle_unpack(self, data):
"""
Unpack with cPickle
"""
return p_loads(data)
def use_json(self):
"""
Use json as serialization tool
"""
self.pack = self.__json_pack
self.unpack = self.__json_unpack
def use_msgpack(self):
"""
Use msgpack as serialization tool
"""
self.pack = self.__msgpack_pack
self.unpack = self.__msgpack_unpack
def use_pickle(self):
"""
Use cPickle as serialization tool
"""
self.pack = self.__pickle_pack
self.unpack = self.__pickle_unpack
class RpcServer(RpcObject):
""""""
def __init__(self, rep_address, pub_address):
"""
Constructor
"""
super(RpcServer, self).__init__()
# Save functions dict: key is fuction name, value is fuction object
self.__functions = {}
# Zmq port related
self.__context = zmq.Context()
self.__socket_rep = self.__context.socket(zmq.REP) # Reply socket (Request–reply pattern)
self.__socket_rep.bind(rep_address)
self.__socket_pub = self.__context.socket(zmq.PUB) # Publish socket (Publish–subscribe pattern)
self.__socket_pub.bind(pub_address)
# Woker thread related
self.__active = False # RpcServer status
self.__thread = threading.Thread(target=self.run) # RpcServer thread
def start(self):
"""
Start RpcServer
"""
# Start RpcServer status
self.__active = True
# Start RpcServer thread
if not self.__thread.isAlive():
self.__thread.start()
def stop(self, join=False):
"""
Stop RpcServer
"""
# Stop RpcServer status
self.__active = False
# Wait for RpcServer thread to exit
if join and self.__thread.isAlive():
self.__thread.join()
def run(self):
"""
Run RpcServer functions
"""
while self.__active:
# Use poll to wait event arrival, waiting time is 1 second (1000 milliseconds)
if not self.__socket_rep.poll(1000):
continue
# Receive request data from Reply socket
reqb = self.__socket_rep.recv()
# Unpack request by deserialization
req = self.unpack(reqb)
# Get function name and parameters
name, args, kwargs = req
# Try to get and execute callable function object; capture exception information if it fails
try:
func = self.__functions[name]
r = func(*args, **kwargs)
rep = [True, r]
except Exception as e:
rep = [False, traceback.format_exc()]
# Pack response by serialization
repb = self.pack(rep)
# send callable response by Reply socket
self.__socket_rep.send(repb)
def publish(self, topic, data):
"""
Publish data
"""
# Serialized data
datab = self.pack(data)
# Send data by Publish socket
self.__socket_pub.send_multipart([topic, datab]) # topci must be ascii encoding
def register(self, func):
"""
Register function
"""
self.__functions[func.__name__] = func
class RpcClient(RpcObject):
""""""
def __init__(self, req_address, sub_address):
"""Constructor"""
super(RpcClient, self).__init__()
# zmq port related
self.__req_address = req_address
self.__sub_address = sub_address
self.__context = zmq.Context()
self.__socket_req = self.__context.socket(zmq.REQ) # Request socket (Request–reply pattern)
self.__socket_sub = self.__context.socket(zmq.SUB) # Subscribe socket (Publish–subscribe pattern)
# Woker thread relate, used to process data pushed from server
self.__active = False # RpcClient status
self.__thread = threading.Thread(target=self.run) # RpcClient thread
def __getattr__(self, name):
"""
Realize remote call function
"""
# Perform remote call task
def dorpc(*args, **kwargs):
# Generate request
req = [name, args, kwargs]
# Pack request by serialization
reqb = self.pack(req)
# Send request and wait for response
self.__socket_req.send(reqb)
repb = self.__socket_req.recv()
# Unpack response by deserialization
rep = self.unpack(repb)
# Return response if successed; Trigger exception if failed
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
def start(self):
"""
Start RpcClient
"""
# Connect zmq port
self.__socket_req.connect(self.__req_address)
self.__socket_sub.connect(self.__sub_address)
# Start RpcClient status
self.__active = True
# Start RpcClient thread
if not self.__thread.isAlive():
self.__thread.start()
def stop(self):
"""
Stop RpcClient
"""
# Stop RpcClient status
self.__active = False
# Wait for RpcClient thread to exit
if self.__thread.isAlive():
self.__thread.join()
def run(self):
"""
Run RpcClient function
"""
while self.__active:
# Use poll to wait event arrival, waiting time is 1 second (1000 milliseconds)
if not self.__socket_sub.poll(1000):
continue
# Receive data from subscribe socket
topic, datab = self.__socket_sub.recv_multipart()
# Unpack data by deserialization
data = self.unpack(datab)
# Process data by callable function
self.callback(topic, data)
def callback(self, topic, data):
"""
Callable function
"""
raise NotImplementedError
def subscribeTopic(self, topic):
"""
Subscribe data
"""
self.__socket_sub.setsockopt(zmq.SUBSCRIBE, topic)
class RemoteException(Exception):
"""
RPC remote exception
"""
def __init__(self, value):
"""
Constructor
"""
self.__value = value
def __str__(self):
"""
Output error message
"""
return self.__value
| Python | 0.000001 | |
8b419fefc93f9084b8d504b7382fd51087e4645f | add migration script that removes table 'regressions' | benchbuild/db/versions/001_Remove_RegressionTest_table.py | benchbuild/db/versions/001_Remove_RegressionTest_table.py | """
Remove unneeded Regressions table.
This table can and should be reintroduced by an experiment that requires it.
"""
from sqlalchemy import Table, Column, ForeignKey, Integer, String
from benchbuild.utils.schema import metadata
META = metadata()
REGRESSION = Table('regressions', META,
Column(
'run_id',
Integer,
ForeignKey(
'run.id', onupdate="CASCADE", ondelete="CASCADE"),
index=True,
primary_key=True), Column('name', String),
Column('module', String), Column('project_name', String))
def upgrade(migrate_engine):
META.bind = migrate_engine
REGRESSION.drop()
def downgrade(migrate_engine):
META.bind = migrate_engine
REGRESSION.create()
| Python | 0.000138 | |
847232f2890a4700e4983cd971ef2cd1a76a4b1d | rebuild cases | corehq/apps/cleanup/management/commands/rebuild_cases.py | corehq/apps/cleanup/management/commands/rebuild_cases.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from corehq.form_processor.backends.sql.processor import FormProcessorSQL
from corehq.form_processor.models import RebuildWithReason
logger = logging.getLogger('rebuild_cases')
logger.setLevel('DEBUG')
class Command(BaseCommand):
help = ('Rebuild given cases')
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('cases_csv_file')
def handle(self, domain, cases_csv_file, **options):
cases = []
with open(cases_csv_file, 'r') as f:
lines = f.readlines()
cases = [l.strip() for l in lines]
rebuild_cases(domain, cases, logger)
def rebuild_cases(domain, cases, logger):
detail = RebuildWithReason(reason='undo UUID clash')
for case_id in cases:
try:
FormProcessorSQL.hard_rebuild_case(domain, case_id, detail)
logger.info('Case %s rebuilt' % case_id)
except Exception as e:
logger.error("Exception rebuilding case %s".format(case_id))
logger.exception("message")
| Python | 0.000002 | |
0919661333c8099a85e7c12c6ce9393ced8c985b | create the lib directory to hold vendored libraries | ceph_deploy/lib/__init__.py | ceph_deploy/lib/__init__.py | """
This module is meant for vendorizing Python libraries. Most libraries will need
to have some ``sys.path`` alterations done unless they are doing relative
imports.
Do **not** add anything to this module that does not represent a vendorized
library.
"""
import remoto
| Python | 0 | |
6303ffeee0118a2fef1cb0a9abfe931a04ee6974 | Fix web app. #79 | channelworm/web_app/wsgi.py | channelworm/web_app/wsgi.py | """
WSGI config for myproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web_app.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| Python | 0.000048 | |
e5ed4497fd8aee709dd441cfcddc9a1a91c538d4 | add theilsen | chart-02-theilsen-median-of-root-median-squared-errors.py | chart-02-theilsen-median-of-root-median-squared-errors.py | # create files for chart-02-theilsen-median-of-root-mdian-squared-errors
# with these choices
# metric in median-root-median-squared-errors
# model in theilsen
# ndays in 30 60 ... 360
# predictors in act actlog ct ctlog
# responses in price logprice
# usetax in yes no
# year in 2008
# invocations and files created
# python chart-02X.py makefile -> src/chart-02X.makefile
# python chart-02X.py data -> data/working/chart-02X.data
# python chart-02X.py txt -> data/working/chart-02X.txt
# python chart-02X.py txtY -> data/working/chart-02X-Y.txt
import sys
from Bunch import Bunch
from chart_02_template import chart
def main():
specs = Bunch(metric='median-of-root-median-squared-errors',
title='Median of Root Median Squared Errors',
model='theilsen',
training_periods=['30', '60', '90', '120', '150', '180',
'210', '240', '270', '300', '330', '360'],
feature_sets=['act', 'actlog', 'ct', 'ctlog'],
responses=['price', 'logprice'],
year='2008')
chart(specs=specs,
argv=sys.argv)
if __name__ == '__main__':
main()
| Python | 0.999936 | |
2df737f2690925e2752ae7633f1db05f952209bc | Create led_record.py | led_record.py | led_record.py | #!/usr/bin/env python
import RPi.GPIO as GPIO
from time import sleep
import os
import subprocess
# Setup getting an image
def get_video(state):
folderName = "/home/pi/HumphreyData/"
if os.path.isdir(folderName)== False:
os.makedirs(folderName)
fileNumber = 1
filePath = folderName + str(fileNumber) + ".h264"
while os.path.isfile(filePath):
fileNumber += 1
filePath = folderName + str(fileNumber) + ".h264"
fileName = str(fileNumber)
cmdStr = "sudo raspivid -n -w 1024 -h 768 -t 0 -fps 2 -o %s/%s.h264" %(folderName, fileName)
if state:
capture = subprocess.Popen(cmdStr, shell=True)
else:
pid = "sudo pkill -15 -f raspivid"
os.system(pid)
# Setup LED control
def switch_LED(state):
for item in LEDpins:
GPIO.output(item, state)
# Setup GPIO config
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
# Setup GPIO pins
LEDpins = [19, 21]
switchState = 23
# If true, LEDS are off -> GPIO pins are current sinks
lOn = False
lOff = True
# Configure LED GPIO pins
for item in LEDpins:
GPIO.setup(item, GPIO.OUT)
GPIO.output(item, lOff)
# Configure switch GPIO pins
GPIO.setup(switchState, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Scipt ready flash
flashes = 1
while flashes < 4:
switch_LED(lOn)
sleep(0.5)
switch_LED(lOff)
sleep(0.5)
flashes += 1
# Pin check loop
while True:
if GPIO.input(switchState):
captureState = False
switch_LED(lOff)
else:
captureState = True
switch_LED(lOn)
get_video(captureState)
GPIO.wait_for_edge(switchState, GPIO.BOTH)
sleep(0.2)
# Script cleanup
GPIO.cleanup()
| Python | 0.000001 | |
f68689e3b6caaad2d143d92af5395f7c12316525 | add simple test file | test.py | test.py | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from pybasicbayes.distributions import Gaussian, Regression
from autoregressive.distributions import AutoRegression
from pyhsmm.util.text import progprint_xrange
from models import LDS
np.random.seed(0)
#########################
# set some parameters #
#########################
mu_init = np.array([0.,1.])
sigma_init = 0.01*np.eye(2)
A = 0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],
[np.sin(np.pi/24), np.cos(np.pi/24)]])
# A = 0.99*np.eye(2)
sigma_states = 0.01*np.eye(2)
C = np.array([[10.,0.]])
sigma_obs = 0.01*np.eye(1)
###################
# generate data #
###################
truemodel = LDS(
init_dynamics_distn=Gaussian(mu=mu_init,sigma=sigma_init),
dynamics_distn=AutoRegression(A=A,sigma=sigma_states),
emission_distn=Regression(A=C,sigma=sigma_obs)
)
data, stateseq = truemodel.generate(2000)
###############
# fit model #
###############
model = LDS(
init_dynamics_distn=Gaussian(mu_0=np.zeros(2),nu_0=3.,sigma_0=3*np.eye(2),kappa_0=1.),
dynamics_distn=AutoRegression(nu_0=3.,S_0=np.eye(2),M_0=np.zeros((2,2)),K_0=5*np.eye(2)),
emission_distn=Regression(nu_0=2.,S_0=np.eye(1),M_0=np.zeros((1,2)),K_0=5*np.eye(2)),
)
model.add_data(data,stateseq=stateseq)
# model.add_data(data)
model.resample_parameters()
for _ in progprint_xrange(100):
model.resample_model()
print np.linalg.eigvals(A)
print np.linalg.eigvals(model.dynamics_distn.A)
| Python | 0 | |
2e4bb9ca00c992dab0967b3238d8aebd8710d79d | Create controller.py | src/controller.py | src/controller.py | #!/usr/bin/env python
import rospy
if __name__ == '__main__':
pass
| Python | 0.000001 | |
c3748579854ae06c995cb12ea45a1be4de8f827d | Add gallery migration | features/galleries/migrations/0003_auto_20170421_1109.py | features/galleries/migrations/0003_auto_20170421_1109.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 09:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('galleries', '0002_auto_20170421_0934'),
]
operations = [
migrations.AlterField(
model_name='galleryimage',
name='gallery',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='content2.Content'),
),
]
| Python | 0 | |
a6381765ad8e15624a5dabb848283e92b0e90d8c | Create rpkm_bin.py | code_collection/rpkm_bin.py | code_collection/rpkm_bin.py | import sys
peak=[]
with open(sys.argv[1],'r') as f:
for line in f:
line=line.strip('\n').split('\t')
peak.append(line)
bed=[]
with open(sys.argv[2],'r') as f:
for line in f:
line=line.strip('\n').split('\t')
bed.append(line)
SIZE=int(sys.argv[3])
index=0
n=len(peak)
num=[0]*n
for read in bed:
mid=(int(read[1])+int(read[2]))/2
while (index<n-1 and mid>int(peak[index][2])) or (index<n-1 and read[0]!=peak[index][0]):
index+=1
num[index]+=1
if (index<n-1) and (mid==int(peak[index+1][1])):
num[index+1]+=1
output=[]
for i in range(n):
if num[i]!=0:
y=1.0*num[i]*10**9/SIZE/(int(peak[i][2])-int(peak[i][1]))
y='%.4f'%y
output.append(peak[i][0]+'\t'+peak[i][1]+'\t'+peak[i][2]+'\t'+peak[i][3]+'\t'+str(num[i])+'\t'+y+'\n')
else:
output.append(peak[i][0]+'\t'+peak[i][1]+'\t'+peak[i][2]+'\t'+peak[i][3]+'\t'+str(num[i])+'\t'+str(0)+'\n')
with open('reads.txt','w') as f:
f.writelines(output)
f.close()
| Python | 0.000003 | |
85202173cf120caad603315cd57fa66857a88b0b | Add missing migrations for institutions | feder/institutions/migrations/0013_auto_20170810_2118.py | feder/institutions/migrations/0013_auto_20170810_2118.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-10 21:18
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('institutions', '0012_auto_20170808_0309'),
]
operations = [
migrations.AlterField(
model_name='institution',
name='extra',
field=jsonfield.fields.JSONField(blank=True, verbose_name=b'Unorganized additional information'),
),
]
| Python | 0.000006 | |
4bb5653f5f7f95bf28b2ee596c441cbc4c7fbf3a | Create whitefilterstr.py | whitefilterstr.py | whitefilterstr.py | def whiteListCharFilter(inStr, whiteListStr):
""" Sanatize a string with a list of allowed (white) characters
Input: inStr {string} String to be sanatized.
Input: whiteListStr {string} String with allowed characters.
Output: outStr {string} Sanatized string
"""
outStr = ""
if (isinstance(inStr, str) or isinstance(whiteListStrinStr, str)) == False:
return outStr
for characterStr in inStr:
if characterStr in whiteListStr:
outStr = outStr + characterStr
return outStr
| Python | 0.000016 | |
4bfb560dc9f28d850a89c98590df032849cfc035 | Create zoql.py | zoql.py | zoql.py | #!/usr/local/bin/python3
import sys
import cmd
import csv
import pdb
import config
from zuora import Zuora
zuora = Zuora(config.zuoraConfig)
def zuoraObjectKeys(zouraObject):
if zouraObject:
return zouraObject.keys()
def dumpRecords(records):
if records:
firstRecord = records[0]
keys = [key for key in zuoraObjectKeys(firstRecord) if firstRecord[key]]
print(','.join(keys))
for record in records:
print(','.join(str(record[key]) for key in keys))
print(len(records), 'records')
class Interpeter(cmd.Cmd):
def do_select(self, line):
try:
if '.' in line:
csvData = zuora.queryExport('select ' + line).split('\n')
records = [record for record in csv.DictReader(csvData)]
else:
records = zuora.queryAll('select ' + line)
dumpRecords(records)
except Exception as e:
print('Error: q', repr(e))
def do_q(self, line):
return self.do_EOF(line)
def do_EOF(self, line):
return True
if __name__ == '__main__':
Interpeter().cmdloop()
| Python | 0.000164 | |
eb250318cf6933b4a037bd9ea238ce0fc7be58c2 | add first script | gitthemall.py | gitthemall.py | #! /usr/bin/env python2
import argparse
import os.path
import logging
import sys
logging.basicConfig(format='%(levelname)s: %(message)s')
def fail(msg):
'Fail program with printed message'
logging.error(msg)
sys.exit(1)
def update(repo, actions):
'Update repo according to allowed actions.'
repo = os.path.expanduser(repo)
logging.debug('going to %s' % repo)
if not os.path.isdir(repo):
fail('No directory at %s!' % repo)
if not os.path.isdir(os.path.join(repo, '.git')):
fail('No git repo at %s!' % repo)
def parse(config):
'Parse config and yield repos with actions'
with open(config) as f:
for line in f:
items = line.strip().split(',')
yield items[0], items[1:]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Keep git repos up-to-date.')
parser.add_argument('config', type=str, help='config file that lists repos')
parser.add_argument('-v', '--verbose', default=False, action='store_true')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
for repo, actions in parse(args.config):
update(repo, actions)
| Python | 0.000018 | |
f9ea992353f2caa835ca2007eb07b470d1b782a3 | Fix migration colorfield | geotrek/trekking/migrations/0006_practice_mobile_color.py | geotrek/trekking/migrations/0006_practice_mobile_color.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-03-04 12:43
from __future__ import unicode_literals
import colorfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('trekking', '0005_auto_20181219_1524'),
]
operations = [
migrations.AddField(
model_name='practice',
name='mobile_color',
field=colorfield.fields.ColorField(db_column=b'couleur_mobile', default=b'#444444', help_text="Color's practice in mobile", max_length=18, verbose_name='Mobile color'),
),
]
| Python | 0.000002 | |
3959ad4a4ddc4655c1acd8362de4284ba1e8d3e7 | Apply the hack that renames local_settings.py only when running setup.py | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
from setuptest import test
import os, sys
if sys.argv[1] == 'install':
'''
Rename local_settings.py in order to
be excluded from setup.py install command
'''
ORIG_NAME = 'cronos/local_settings.py'
TEMP_NAME = 'cronos/local_settings.py1'
try:
os.rename(ORIG_NAME, TEMP_NAME)
except:
pass
setup(
name='cronos',
version='0.3-dev',
description='Django application that collects announcements and other \
personal data for students of TEI of Larissa',
author='cronos development team',
author_email='cronos@teilar.gr',
url='http://cronos.teilar.gr',
license='AGPLv3',
packages=find_packages(),
include_package_data=True,
data_files=[
('', ['LICENSE', 'manage.py']),
('bin', [
'bin/update_cronos.sh',
'bin/logs_create_fix_perms.sh',
'bin/get_full_production_db.sh'
]),
('configs', [
'configs/apache.conf',
'configs/cron.d_cronos',
'configs/logrotate.d_cronos',
'configs/logrotate.d_cronos-dev',
'configs/syslog-ng.conf'
]),
],
cmdclass={'test': test},
)
if sys.argv[1] == 'install':
'''
Restore local_settings.py
'''
try:
os.rename(TEMP_NAME, ORIG_NAME)
except:
pass
| #!/usr/bin/env python
from setuptools import setup, find_packages
from setuptest import test
import os
'''
Rename local_settings.py in order to
be excluded from setup.py install command
'''
ORIG_NAME = 'cronos/local_settings.py'
TEMP_NAME = 'cronos/local_settings.py1'
try:
os.rename(ORIG_NAME, TEMP_NAME)
except:
pass
setup(
name='cronos',
version='0.3-dev',
description='Django application that collects announcements and other \
personal data for students of TEI of Larissa',
author='cronos development team',
author_email='cronos@teilar.gr',
url='http://cronos.teilar.gr',
license='AGPLv3',
packages=find_packages(),
include_package_data=True,
data_files=[
('', ['LICENSE', 'manage.py']),
('bin', [
'bin/update_cronos.sh',
'bin/logs_create_fix_perms.sh',
'bin/get_full_production_db.sh'
]),
('configs', [
'configs/apache.conf',
'configs/cron.d_cronos',
'configs/logrotate.d_cronos',
'configs/logrotate.d_cronos-dev',
'configs/syslog-ng.conf'
]),
],
cmdclass={'test': test},
)
'''
Restore local_settings.py
'''
try:
os.rename(TEMP_NAME, ORIG_NAME)
except:
pass
| Python | 0.000001 |
1c181eb7f9987d2147df48a762d34895593f031a | Add version for torch dependency | setup.py | setup.py | #!/usr/bin/env python
import io
import os
import shutil
import subprocess
from pathlib import Path
import distutils.command.clean
from setuptools import setup, find_packages
from build_tools import setup_helpers
ROOT_DIR = Path(__file__).parent.resolve()
def read(*names, **kwargs):
with io.open(ROOT_DIR.joinpath(*names), encoding=kwargs.get("encoding", "utf8")) as fp:
return fp.read()
def _get_version():
version = '0.9.0a0'
sha = None
try:
cmd = ['git', 'rev-parse', 'HEAD']
sha = subprocess.check_output(cmd, cwd=str(ROOT_DIR)).decode('ascii').strip()
except Exception:
pass
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha is not None:
version += '+' + sha[:7]
if sha is None:
sha = 'Unknown'
return version, sha
def _export_version(version, sha):
version_path = ROOT_DIR / 'torchtext' / 'version.py'
with open(version_path, 'w') as fileobj:
fileobj.write("__version__ = '{}'\n".format(version))
fileobj.write("git_version = {}\n".format(repr(sha)))
VERSION, SHA = _get_version()
_export_version(VERSION, SHA)
print('-- Building version ' + VERSION)
pytorch_package_version = os.getenv('PYTORCH_VERSION')
pytorch_package_dep = 'torch'
if pytorch_package_version is not None:
pytorch_package_dep += "==" + pytorch_package_version
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove torchtext extension
for path in (ROOT_DIR / 'torchtext').glob('**/*.so'):
print(f'removing \'{path}\'')
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / 'build',
ROOT_DIR / 'third_party' / 'build',
]
for path in build_dirs:
if path.exists():
print(f'removing \'{path}\' (and everything under it)')
shutil.rmtree(str(path), ignore_errors=True)
setup_info = dict(
# Metadata
name='torchtext',
version=VERSION,
author='PyTorch core devs and James Bradbury',
author_email='jekbradbury@gmail.com',
url='https://github.com/pytorch/text',
description='Text utilities and datasets for PyTorch',
long_description=read('README.rst'),
license='BSD',
install_requires=[
'tqdm', 'requests', pytorch_package_dep, 'numpy'
],
python_requires='>=3.5',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
# Package info
packages=find_packages(exclude=('test*', 'build_tools*')),
zip_safe=False,
# Extension info
# If you are trying to use torchtext.so and see no registered op.
# See here: https://github.com/pytorch/vision/issues/2134"
ext_modules=setup_helpers.get_ext_modules(),
cmdclass={
'build_ext': setup_helpers.BuildExtension.with_options(no_python_abi_suffix=True),
'clean': clean,
},
)
setup(**setup_info)
| #!/usr/bin/env python
import io
import os
import shutil
import subprocess
from pathlib import Path
import distutils.command.clean
from setuptools import setup, find_packages
from build_tools import setup_helpers
ROOT_DIR = Path(__file__).parent.resolve()
def read(*names, **kwargs):
with io.open(ROOT_DIR.joinpath(*names), encoding=kwargs.get("encoding", "utf8")) as fp:
return fp.read()
def _get_version():
version = '0.9.0a0'
sha = None
try:
cmd = ['git', 'rev-parse', 'HEAD']
sha = subprocess.check_output(cmd, cwd=str(ROOT_DIR)).decode('ascii').strip()
except Exception:
pass
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha is not None:
version += '+' + sha[:7]
if sha is None:
sha = 'Unknown'
return version, sha
def _export_version(version, sha):
version_path = ROOT_DIR / 'torchtext' / 'version.py'
with open(version_path, 'w') as fileobj:
fileobj.write("__version__ = '{}'\n".format(version))
fileobj.write("git_version = {}\n".format(repr(sha)))
VERSION, SHA = _get_version()
_export_version(VERSION, SHA)
print('-- Building version ' + VERSION)
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove torchtext extension
for path in (ROOT_DIR / 'torchtext').glob('**/*.so'):
print(f'removing \'{path}\'')
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / 'build',
ROOT_DIR / 'third_party' / 'build',
]
for path in build_dirs:
if path.exists():
print(f'removing \'{path}\' (and everything under it)')
shutil.rmtree(str(path), ignore_errors=True)
setup_info = dict(
# Metadata
name='torchtext',
version=VERSION,
author='PyTorch core devs and James Bradbury',
author_email='jekbradbury@gmail.com',
url='https://github.com/pytorch/text',
description='Text utilities and datasets for PyTorch',
long_description=read('README.rst'),
license='BSD',
install_requires=[
'tqdm', 'requests', 'torch', 'numpy'
],
python_requires='>=3.5',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
# Package info
packages=find_packages(exclude=('test*', 'build_tools*')),
zip_safe=False,
# Extension info
# If you are trying to use torchtext.so and see no registered op.
# See here: https://github.com/pytorch/vision/issues/2134"
ext_modules=setup_helpers.get_ext_modules(),
cmdclass={
'build_ext': setup_helpers.BuildExtension.with_options(no_python_abi_suffix=True),
'clean': clean,
},
)
setup(**setup_info)
| Python | 0 |
b187e844d667b14dcc7874b351ee3f82383be348 | Fix dependency reference error | setup.py | setup.py | import ast
import re
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('puckdb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='puckdb',
author='Aaron Toth',
version=version,
url='https://github.com/aaront/puckdb',
description='An async-first hockey data extractor and API',
long_description=open('README.rst').read(),
test_suite="tests",
include_package_data=True,
packages=find_packages(),
package_data={'': ['LICENSE']},
package_dir={'puckdb': 'puckdb'},
license='Apache 2.0',
install_requires=[
'aiodns',
'cchardet',
'aiohttp',
'aiodns',
'asyncpg',
'asyncpgsa',
'click',
'click-datetime',
'python-dateutil',
'pytz',
'pg8000',
'sqlalchemy',
'ujson',
'python-dotenv',
'dataclasses',
'alembic',
'pint'
],
entry_points='''
[console_scripts]
puckdb=puckdb.console:main
''',
classifiers=(
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries'
)
)
| import ast
import re
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('puckdb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='puckdb',
author='Aaron Toth',
version=version,
url='https://github.com/aaront/puckdb',
description='An async-first hockey data extractor and API',
long_description=open('README.rst').read(),
test_suite="tests",
include_package_data=True,
packages=find_packages(),
package_data={'': ['LICENSE']},
package_dir={'puckdb': 'puckdb'},
license='Apache 2.0',
install_requires=[
'aiodns',
'cchardet',
'aiohttp',
'aiodns',
'dotenv',
'asyncpg',
'asyncpgsa',
'click',
'click-datetime',
'python-dateutil',
'pytz',
'pg8000',
'sqlalchemy',
'ujson',
'python-dotenv',
'dataclasses',
'alembic',
'pint'
],
entry_points='''
[console_scripts]
puckdb=puckdb.console:main
''',
classifiers=(
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries'
)
)
| Python | 0.000007 |
40c6a07808be26de0534a5b6f47ef28f591a500c | bump again | setup.py | setup.py | from setuptools import setup, find_packages
requires = []
dep_links = []
for dep in open('requirements.txt').read().split("\n"):
if dep.startswith('git+'):
dep_links.append(dep)
else:
requires.append(dep)
setup(
name='django-suave',
version="0.5.7",
description='Rather nice pages.',
long_description=open('README.rst').read(),
url='https://github.com/radiosilence/django-suave',
author='James Cleveland',
author_email='jamescleveland@gmail.com',
packages=find_packages(),
include_package_data=True,
license="LICENSE.txt",
install_requires=requires,
dependency_links=dep_links,
)
| from setuptools import setup, find_packages
requires = []
dep_links = []
for dep in open('requirements.txt').read().split("\n"):
if dep.startswith('git+'):
dep_links.append(dep)
else:
requires.append(dep)
setup(
name='django-suave',
version="0.5.6",
description='Rather nice pages.',
long_description=open('README.rst').read(),
url='https://github.com/radiosilence/django-suave',
author='James Cleveland',
author_email='jamescleveland@gmail.com',
packages=find_packages(),
include_package_data=True,
license="LICENSE.txt",
install_requires=requires,
dependency_links=dep_links,
)
| Python | 0 |
71fb2fc819c82e2db4075c6e5e32b2addc99c63a | Add platforms and classifiers | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='gsmsapi',
version='0.10',
description='SMS API for (german) SMS providers',
author='Torge Szczepanek',
author_email='debian@cygnusnetworks.de',
maintainer='Torge Szczepanek',
maintainer_email='debian@cygnusnetworks.de',
license='MIT',
packages=['gsmsapi'],
url = 'https://github.com/CygnusNetworks/python-gsmsapi',
download_url = 'https://github.com/CygnusNetworks/python-gsmsapi/tarball/v0.10',
keywords = ["sms", "german", "sipgate", "smstrade", "api"],
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'] # see: https://pypi.python.org/pypi?%3Aaction=list_classifiers
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='gsmsapi',
version='0.10',
description='SMS API for (german) SMS providers',
author='Torge Szczepanek',
author_email='debian@cygnusnetworks.de',
maintainer='Torge Szczepanek',
maintainer_email='debian@cygnusnetworks.de',
license='MIT',
packages=['gsmsapi'],
url = 'https://github.com/CygnusNetworks/python-gsmsapi',
download_url = 'https://github.com/CygnusNetworks/python-gsmsapi/tarball/v0.10',
keywords = ["sms", "german", "sipgate", "smstrade", "api"],
)
| Python | 0.000001 |
8b8383680e73496a73a3a520c3ebc85e2e01ce01 | fix version in setup.py | setup.py | setup.py | #!/usr/bin/env python
"""
Flask-REST4
-------------
Elegant RESTful API for your Flask apps.
"""
from setuptools import setup
setup(
name='flask_rest4',
version='0.1.3',
url='https://github.com/squirrelmajik/flask_rest4',
license='See License',
author='majik',
author_email='me@yamajik.com',
description='Elegant RESTful API for your Flask apps.',
long_description=__doc__,
py_modules=['flask_rest4'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| #!/usr/bin/env python
"""
Flask-REST4
-------------
Elegant RESTful API for your Flask apps.
"""
from setuptools import setup
setup(
name='flask_rest4',
version='0.1.0',
url='https://github.com/squirrelmajik/flask_rest4',
license='See License',
author='majik',
author_email='me@yamajik.com',
description='Elegant RESTful API for your Flask apps.',
long_description=__doc__,
py_modules=['flask_rest4'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| Python | 0 |
b03b6faea0470d867749c7b3bc3d6edc9c2406b9 | Remove pytest-Django | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
# Utility function to read file in the setup.py directory
def open_here(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
def get_dependencies(env_yml_file):
"""
Read the dependencies from a Conda environment file in YAML
and return a list of such dependencies (from conda and pip list)
Be sure to match packages specification for each of:
- Conda : http://conda.pydata.org/docs/spec.html#build-version-spec
- Pip & Setuptool :
- http://pythonhosted.org/setuptools/setuptools.html?highlight=install_require#declaring-dependencies
- https://pythonhosted.org/setuptools/pkg_resources.html#requirement-objects
"""
import yaml
with open_here(env_yml_file) as f:
environment = yaml.load(f)
conda_dependencies = []
package_map = {
'pytables': 'tables', # insert 'tables' instead of 'pytables'
'yaafe': ''
}
for dep in environment['dependencies']:
if isinstance(dep, str) and not(dep.startswith('python')):
if dep in package_map:
conda_dependencies.append(package_map[dep])
else:
conda_dependencies.append(dep)
elif isinstance(dep, dict) and 'pip' in dep:
pip_dependencies = dep['pip']
return conda_dependencies + pip_dependencies
# Pytest
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests', '--ignore', 'tests/sandbox', '--verbose']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
CLASSIFIERS = [
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
]
KEYWORDS = 'audio analysis features extraction MIR transcoding graph visualize plot HTML5 interactive metadata player'
setup(
name='TimeSide',
url='https://github.com/Parisson/TimeSide/',
description="Audio processing framework for the web",
long_description=open_here('README.rst').read(),
author="Guillaume Pellerin, Paul Brossier, Thomas Fillon, Riccardo Zaccarelli, Olivier Guilyardi",
author_email="yomguy@parisson.com, piem@piem.org, thomas@parisson.com, riccardo.zaccarelli@gmail.com, olivier@samalyse.com",
version='0.8.1',
setup_requires=['pyyaml'],
install_requires=[get_dependencies('conda-environment.yml')],
platforms=['OS Independent'],
license='Gnu Public License V2',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
packages=['timeside'],
include_package_data=True,
zip_safe=False,
scripts=['scripts/timeside-waveforms', 'scripts/timeside-launch'],
tests_require=['pytest>=3'],
cmdclass={'test': PyTest},
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
# Utility function to read file in the setup.py directory
def open_here(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
def get_dependencies(env_yml_file):
"""
Read the dependencies from a Conda environment file in YAML
and return a list of such dependencies (from conda and pip list)
Be sure to match packages specification for each of:
- Conda : http://conda.pydata.org/docs/spec.html#build-version-spec
- Pip & Setuptool :
- http://pythonhosted.org/setuptools/setuptools.html?highlight=install_require#declaring-dependencies
- https://pythonhosted.org/setuptools/pkg_resources.html#requirement-objects
"""
import yaml
with open_here(env_yml_file) as f:
environment = yaml.load(f)
conda_dependencies = []
package_map = {
'pytables': 'tables', # insert 'tables' instead of 'pytables'
'yaafe': ''
}
for dep in environment['dependencies']:
if isinstance(dep, str) and not(dep.startswith('python')):
if dep in package_map:
conda_dependencies.append(package_map[dep])
else:
conda_dependencies.append(dep)
elif isinstance(dep, dict) and 'pip' in dep:
pip_dependencies = dep['pip']
return conda_dependencies + pip_dependencies
# Pytest
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests', '--ignore', 'tests/sandbox', '--verbose', '--ds=app.test_settings']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
CLASSIFIERS = [
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
]
KEYWORDS = 'audio analysis features extraction MIR transcoding graph visualize plot HTML5 interactive metadata player'
setup(
name='TimeSide',
url='https://github.com/Parisson/TimeSide/',
description="Audio processing framework for the web",
long_description=open_here('README.rst').read(),
author="Guillaume Pellerin, Paul Brossier, Thomas Fillon, Riccardo Zaccarelli, Olivier Guilyardi",
author_email="yomguy@parisson.com, piem@piem.org, thomas@parisson.com, riccardo.zaccarelli@gmail.com, olivier@samalyse.com",
version='0.8.1',
setup_requires=['pyyaml'],
install_requires=[get_dependencies('conda-environment.yml')],
platforms=['OS Independent'],
license='Gnu Public License V2',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
packages=['timeside'],
include_package_data=True,
zip_safe=False,
scripts=['scripts/timeside-waveforms', 'scripts/timeside-launch'],
tests_require=['pytest>=3', 'pytest-django'],
cmdclass={'test': PyTest},
)
| Python | 0.000006 |
656d24c38c69891d8731ccf32852b66e32120eb7 | Bump dependency | setup.py | setup.py | #!/usr/bin/env python
from setuptools import find_packages, setup
project = "microcosm_pubsub"
version = "0.26.1"
setup(
name=project,
version=version,
description="PubSub with SNS/SQS",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/microcosm-pubsub",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=[
"boto3>=1.3.0",
"marshmallow>=2.12.1",
"microcosm>=0.17.2",
"microcosm-daemon>=0.10.0",
"microcosm-logging>=0.12.0",
],
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
"microcosm.factories": [
"sqs_message_context = microcosm_pubsub.context:configure_sqs_message_context",
"pubsub_message_schema_registry = microcosm_pubsub.registry:configure_schema_registry",
"sqs_consumer = microcosm_pubsub.consumer:configure_sqs_consumer",
"sqs_envelope = microcosm_pubsub.envelope:configure_sqs_envelope",
"sqs_message_dispatcher = microcosm_pubsub.dispatcher:configure",
"sqs_message_handler_registry = microcosm_pubsub.registry:configure_handler_registry",
"sns_producer = microcosm_pubsub.producer:configure_sns_producer",
"sns_topic_arns = microcosm_pubsub.producer:configure_sns_topic_arns",
]
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
| #!/usr/bin/env python
from setuptools import find_packages, setup
project = "microcosm_pubsub"
version = "0.26.1"
setup(
name=project,
version=version,
description="PubSub with SNS/SQS",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/microcosm-pubsub",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=[
"boto3>=1.3.0",
"marshmallow>=2.12.1",
"microcosm>=0.17.1",
"microcosm-daemon>=0.10.0",
"microcosm-logging>=0.12.0",
],
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
"microcosm.factories": [
"sqs_message_context = microcosm_pubsub.context:configure_sqs_message_context",
"pubsub_message_schema_registry = microcosm_pubsub.registry:configure_schema_registry",
"sqs_consumer = microcosm_pubsub.consumer:configure_sqs_consumer",
"sqs_envelope = microcosm_pubsub.envelope:configure_sqs_envelope",
"sqs_message_dispatcher = microcosm_pubsub.dispatcher:configure",
"sqs_message_handler_registry = microcosm_pubsub.registry:configure_handler_registry",
"sns_producer = microcosm_pubsub.producer:configure_sns_producer",
"sns_topic_arns = microcosm_pubsub.producer:configure_sns_topic_arns",
]
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
| Python | 0.000001 |
5e9fa7a1bb8601fb5629d7e7e92a894ab335ccf1 | update readme extension | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
"wheel>=0.23.0",
"requests>=2.7.0",
"pandas>=0.16.2",
"docopt>=0.6.2"
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='labkey_multisite_query_tool',
version='0.1.0',
description="Commandline tool for querying across mutltiple LabKey instances.",
long_description=readme,
author="Stefan Novak",
author_email='novast@ohsu.edu',
url='https://github.com/OHSUCompBio/labkey_multisite_query_tool',
packages=[
'labkey_multisite_query_tool',
],
package_dir={'labkey_multisite_query_tool':
'labkey_multisite_query_tool'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='labkey_multisite_query_tool',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/labkey'],
test_suite='tests',
tests_require=test_requirements
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
"wheel>=0.23.0",
"requests>=2.7.0",
"pandas>=0.16.2",
"docopt>=0.6.2"
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='labkey_multisite_query_tool',
version='0.1.0',
description="Commandline tool for querying across mutltiple LabKey instances.",
long_description=readme + '\n\n' + history,
author="Stefan Novak",
author_email='novast@ohsu.edu',
url='https://github.com/OHSUCompBio/labkey_multisite_query_tool',
packages=[
'labkey_multisite_query_tool',
],
package_dir={'labkey_multisite_query_tool':
'labkey_multisite_query_tool'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='labkey_multisite_query_tool',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/labkey'],
test_suite='tests',
tests_require=test_requirements
)
| Python | 0 |
187dbc9feab320c720c2632c4140a62e2c384328 | bump version | setup.py | setup.py | #!/usr/bin/env python
# Copyright 2016 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import os
import sys
__version__ = '0.25.0'
if sys.argv[-1] == 'publish':
# test server
os.system('python setup.py register -r pypitest')
os.system('python setup.py sdist upload -r pypitest')
# production server
os.system('python setup.py register -r pypi')
os.system('python setup.py sdist upload -r pypi')
sys.exit()
# Convert README.md to README.rst for pypi
try:
from pypandoc import convert
def read_md(f):
return convert(f, 'rst')
# read_md = lambda f: convert(f, 'rst')
except ImportError:
print('warning: pypandoc module not found, '
'could not convert Markdown to RST')
def read_md(f):
return open(f, 'rb').read().decode(encoding='utf-8')
# read_md = lambda f: open(f, 'rb').read().decode(encoding='utf-8')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', 'test']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(name='watson-developer-cloud',
version=__version__,
description='Client library to use the IBM Watson Services',
license='Apache 2.0',
install_requires=['requests>=2.0, <3.0', 'pysolr>= 3.3, <4.0'],
tests_require=['responses', 'pytest', 'python_dotenv'],
cmdclass={'test': PyTest},
author='Jeffrey Stylos',
author_email='jsstylos@us.ibm.com',
long_description=read_md('README.md'),
url='https://github.com/watson-developer-cloud/python-sdk',
packages=['watson_developer_cloud'],
include_package_data=True,
keywords='alchemy datanews, language, vision, question and answer' +
' tone_analyzer, natural language classifier, retrieve and '
'rank,' +
' tradeoff analytics, text to speech,' +
' language translation, language identification,' +
' concept expansion, machine translation, personality '
'insights,' +
' message resonance, watson developer cloud, wdc, watson, '
'ibm,' +
' dialog, user modeling, alchemyapi, alchemy, tone analyzer,' +
'speech to text, visual recognition',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application '
'Frameworks',
],
zip_safe=True
)
| #!/usr/bin/env python
# Copyright 2016 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import os
import sys
__version__ = '0.24.0'
if sys.argv[-1] == 'publish':
# test server
os.system('python setup.py register -r pypitest')
os.system('python setup.py sdist upload -r pypitest')
# production server
os.system('python setup.py register -r pypi')
os.system('python setup.py sdist upload -r pypi')
sys.exit()
# Convert README.md to README.rst for pypi
try:
from pypandoc import convert
def read_md(f):
return convert(f, 'rst')
# read_md = lambda f: convert(f, 'rst')
except ImportError:
print('warning: pypandoc module not found, '
'could not convert Markdown to RST')
def read_md(f):
return open(f, 'rb').read().decode(encoding='utf-8')
# read_md = lambda f: open(f, 'rb').read().decode(encoding='utf-8')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', 'test']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(name='watson-developer-cloud',
version=__version__,
description='Client library to use the IBM Watson Services',
license='Apache 2.0',
install_requires=['requests>=2.0, <3.0', 'pysolr>= 3.3, <4.0'],
tests_require=['responses', 'pytest', 'python_dotenv'],
cmdclass={'test': PyTest},
author='Jeffrey Stylos',
author_email='jsstylos@us.ibm.com',
long_description=read_md('README.md'),
url='https://github.com/watson-developer-cloud/python-sdk',
packages=['watson_developer_cloud'],
include_package_data=True,
keywords='alchemy datanews, language, vision, question and answer' +
' tone_analyzer, natural language classifier, retrieve and '
'rank,' +
' tradeoff analytics, text to speech,' +
' language translation, language identification,' +
' concept expansion, machine translation, personality '
'insights,' +
' message resonance, watson developer cloud, wdc, watson, '
'ibm,' +
' dialog, user modeling, alchemyapi, alchemy, tone analyzer,' +
'speech to text, visual recognition',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application '
'Frameworks',
],
zip_safe=True
)
| Python | 0 |
c95234c130435ddd116784ad1829f7bdaa9182c5 | ADD 138 solutions with A195615(OEIS) | 100_to_199/euler_138.py | 100_to_199/euler_138.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 138
Consider the isosceles triangle with base length, b = 16, and legs, L = 17.
By using the Pythagorean theorem it can be seen that the height of the triangle, h = √(172 − 82) = 15, which is one less than the base length.
With b = 272 and L = 305, we get h = 273, which is one more than the base length, and this is the second smallest isosceles triangle with the property that h = b ± 1.
Find ∑ L for the twelve smallest isosceles triangles for which h = b ± 1 and b, L are positive integers.
'''
from decimal import Decimal
from math import modf
# Pythagorean approximations
# http://oeis.org/A195615 (FORMULA)
def a(n):
if n == 0:
return 15
if n == 1:
return 273
if n == 2:
return 4895
return 17 * a(n - 1) + 17 * a(n - 2) - a(n - 3)
def p138():
highs = [a(i) for i in range(0, 12)]
result = []
for h in highs:
hd = h ** 2
bd = ((h - 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[-]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
continue
bd = ((h + 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[+]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
print(sum(result))
p138()
| Python | 0 | |
2a74598f445c25f5227b19326e7bee160f285574 | Add the task class | cumulusci/tasks/setup.py | cumulusci/tasks/setup.py | import json
import re
import shutil
import os
import tempfile
from cumulusci.core.config import ServiceConfig
from cumulusci.core.exceptions import TaskOptionsError, ServiceNotConfigured
from cumulusci.core.utils import process_bool_arg
from cumulusci.tasks.sfdx import SFDXBaseTask, SFDX_CLI
from cumulusci.utils import random_alphanumeric_underscore
CONNECTED_APP = """<?xml version="1.0" encoding="UTF-8"?>
<ConnectedApp xmlns="http://soap.sforce.com/2006/04/metadata">
<contactEmail>{email}</contactEmail>
<label>{label}</label>
<oauthConfig>
<callbackUrl>http://localhost:8080/callback</callbackUrl>
<consumerKey>{client_id}</consumerKey>
<consumerSecret>{client_secret}</consumerSecret>
<scopes>Web</scopes>
<scopes>Full</scopes>
<scopes>RefreshToken</scopes>
</oauthConfig>
</ConnectedApp>"""
PACKAGE_XML = """<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>*</members>
<name>ConnectedApp</name>
</types>
<version>44.0</version>
</Package>"""
class CreateConnectedApp(SFDXBaseTask):
task_options = {
"label": {
"description": "The label for the connected app. Must contain only alphanumeric and underscores",
"required": True,
},
"email": {
"description": "The email address to associate with the connected app. Defaults to email address from the github service if configured.",
"required": True,
},
"username": {
"description": "Create the connected app in a different org. Defaults to the defaultdevhubusername configured in sfdx.",
"required": False,
},
"connect": {
"description": "If True, the created connected app will be stored as the CumulusCI connected_app service in the keychain.",
"required": False,
},
"overwrite": {
"description": "If True, any existing connected_app service in the CumulusCI keychain will be overwritten. Has no effect if the connect option is False.",
"required": False,
},
}
def _init_options(self, kwargs):
self.client_id = None
self.client_secret = None
kwargs["command"] = "force:mdapi:deploy --wait 5"
super(CreateConnectedApp, self)._init_options(kwargs)
# Validate label
if not re.match(r"^\w+$", self.options["label"]):
raise TaskOptionsError(
"label value must contain only alphanumeric or underscore characters"
)
# Default email to the github service's email if configured
if "email" not in self.options:
try:
github = self.project_config.keychain.get_service("github")
except ServiceNotConfigured:
raise TaskOptionsError(
"Could not determine a default for option 'email'. Either configure the github service using 'cci service connect github' or provide a value for the 'email' option"
)
self.options["email"] = github.email
# Default to sfdx defaultdevhubusername
if "username" not in self.options:
self._set_default_username()
self.options["command"] += " -u {}".format(self.options["username"])
self.options["connect"] = process_bool_arg(self.options.get("connect", False))
self.options["overwrite"] = process_bool_arg(
self.options.get("overwrite", False)
)
def _set_default_username(self):
self.logger.info("Getting username for the default devhub from sfdx")
self._run_command(
command="{} force:config:get defaultdevhubusername --json".format(SFDX_CLI),
env=self._get_env(),
output_handler=self._process_devhub_output,
)
def _process_json_output(self, line):
try:
data = json.loads(line)
return data
except Exception:
self.logger.error("Failed to parse json from line: {}".format(line))
raise
def _process_devhub_output(self, line):
data = self._process_json_output(line)
if "value" not in data["result"][0]:
raise TaskOptionsError(
"No sfdx config found for defaultdevhubusername. Please use the sfdx force:config:set to set the defaultdevhubusername and run again"
)
self.options["username"] = data["result"][0]["value"]
def _generate_id_and_secret(self):
self.client_id = random_alphanumeric_underscore(85)
self.client_secret = random_alphanumeric_underscore(32)
def _build_package(self, tempdir):
connected_app_path = os.path.join(tempdir, "connectedApps")
os.mkdir(connected_app_path)
self._generate_id_and_secret()
with open(
os.path.join(connected_app_path, self.options["label"] + ".connectedApp"),
"w",
) as f:
f.write(
CONNECTED_APP.format(
**{
"label": self.options["label"],
"email": self.options["email"],
"client_id": self.client_id,
"client_secret": self.client_secret,
}
)
)
with open(os.path.join(tempdir, "package.xml"), "w") as f:
f.write(PACKAGE_XML)
def _connect_service(self):
if not self.options["overwrite"]:
try:
connected_app = self.project_config.keychain.get_service(
"connected_app"
)
raise TaskOptionsError(
"The CumulusCI keychain already contains a connected_app service. Set the 'overwrite' option to True to overwrite the existing service"
)
except ServiceNotConfigured:
pass
self.project_config.keychain.set_service(
"connected_app",
ServiceConfig(
{
"client_id": self.client_id,
"client_secret": self.client_secret,
"callback_url": "http://localhost:8080/callback",
}
),
)
def _run_task(self):
tempdir = tempfile.mkdtemp()
self._build_package(tempdir)
self.options["command"] += " -d {}".format(tempdir)
try:
super(CreateConnectedApp, self)._run_task()
except:
shutil.rmtree(tempdir)
if self.options["connect"]:
self._connect_service()
| Python | 0.999999 | |
e7b54968a67bda76546deff546baa49f836cfbaa | Add train_fcn32s | examples/voc/train_fcn32s.py | examples/voc/train_fcn32s.py | #!/usr/bin/env python
import chainer
from chainer.training import extensions
import fcn
def main():
gpu = 0
resume = None # filename
# 1. dataset
dataset_train = fcn.datasets.PascalVOC2012SegmentationDataset('train')
dataset_val = fcn.datasets.PascalVOC2012SegmentationDataset('val')
iter_train = chainer.iterators.SerialIterator(dataset_train, batch_size=1)
iter_val = chainer.iterators.SerialIterator(dataset_val, batch_size=1)
# 2. model
vgg_path = fcn.data.download_vgg16_chainermodel
vgg = fcn.models.VGG16()
chainer.serializers.load_hdf5(vgg_path, vgg)
model = fcn.models.FCN32s()
fcn.util.copy_chainermodel(vgg, model)
if gpu >= 0:
chainer.cuda.get_device(gpu).use()
model.to_gpu()
# 3. optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=1e-10, momentum=0.99)
optimizer.set(model)
# 4. trainer
max_epoch = 10000
updater = chainer.training.StandardUpdater(
iter_train, optimizer, device=gpu)
trainer = chainer.training.Trainer(
updater, (max_epoch, 'epoch'), out='result')
trainer.extend(extensions.Evaluator(iter_val, model, device=gpu))
trainer.extend(extensions.snapshot(), trigger=(max_epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if resume:
chainer.serializers.load_hdf5(resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| Python | 0.000003 | |
b01bd1b21f1b12c9120845ec8a85355b038d6b20 | Add a basic Storage engine to talk to the DB | inventory_control/storage.py | inventory_control/storage.py | """
This is the Storage engine. It's how everything should talk to the database
layer that sits on the inside of the inventory-control system.
"""
import MySQLdb
class StorageEngine(object):
"""
Instantiate a DB access object, create all the necessary hooks and
then the accessors to a SQL database.
"""
def __init__(self, config):
self.config = config
self.db = MySQLdb.connect(host=self.config['host'],
user=self.config['user'],
passwd=self.config['password'],
db=self.config['db'])
self.cursor = self.db.cursor()
| Python | 0 | |
5397bbe4a87dba82dc9fa57abf09a4346aa63f46 | Add 168 python solution (#38) | python/168_Excel_Sheet_Column_Title.py | python/168_Excel_Sheet_Column_Title.py | class Solution:
def convertToTitle(self, n: int) -> str:
res = ""
while n > 0:
n -= 1
res = chr(65 + n % 26) + res
n //= 26
return res
| Python | 0 | |
399daa8ebec14bc4d7ee6c08135e525190e1eb6f | Add short Python script that prints as many dummy divs as needed. | collections/show-test/print-divs.py | collections/show-test/print-divs.py | # print-divs.py
def printDivs(num):
for i in range(num):
print('<div class="item">Item ' + str(i+1) + '</div>')
printDivs(20) | Python | 0 | |
97883fa22dd8b1207cd533b4dd9e438c83a32a90 | Update version. | mixer/__init__.py | mixer/__init__.py | """
Description.
"""
# Module information
# ==================
__version__ = '0.2.0'
__project__ = 'mixer'
__author__ = "horneds <horneds@gmail.com>"
__license__ = "BSD"
| """
Description.
"""
# Module information
# ==================
__version__ = '0.1.0'
__project__ = 'mixer'
__author__ = "horneds <horneds@gmail.com>"
__license__ = "BSD" | Python | 0 |
2b80b358edd5bcf914d0c709369dbbcfd748772b | Add in a test for the marketing_link function in mitxmako | common/djangoapps/mitxmako/tests.py | common/djangoapps/mitxmako/tests.py | from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from mitxmako.shortcuts import marketing_link
from mock import patch
class ShortcutsTests(TestCase):
"""
Test the mitxmako shortcuts file
"""
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'about_edx'})
def test_marketing_link(self):
# test marketing site on
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': True}):
expected_link = 'dummy-root/about-us'
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
# test marketing site off
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': False}):
expected_link = reverse('about_edx')
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
| Python | 0.000002 | |
7236d0358064968b9cbb0ab7f4ee9876dea02aaa | add python common functions | python/tcp_port_scan/tcp_port_scan.py | python/tcp_port_scan/tcp_port_scan.py | # -*- coding: utf-8 -*-
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2015 DennyZhang.com
## File : tcp_port_scan.py
## Author : DennyZhang.com <denny@dennyzhang.com>
## Description :
## --
## Created : <2016-01-15>
## Updated: Time-stamp: <2016-08-11 23:14:08>
##-------------------------------------------------------------------
import argparse
import subprocess
import os, sys
################################################################################
# TODO: move to common library
def strip_comments(string):
# remove empty lines and comments (# ...) from string
l = []
for line in string.split("\n"):
line = line.strip()
if line.startswith("#") or line == "":
continue
l.append(line)
return "\n".join(l)
def string_remove(string, opt_list):
l = []
# remove entries from string
for line in string.split("\n"):
should_remove = False
for item in opt_list:
if item in line:
should_remove = True
if should_remove is False:
l.append(line)
return "\n".join(l)
# TODO: common logging
################################################################################
nmap_command = "sudo nmap -sS -PN %s" # ("-p T:XXX,XXX 192.168.0.16")
result_dict = {}
def nmap_check(server_ip, ports):
if ports == "":
nmap_opts = server_ip
else:
nmap_opts = "-p %s %s" % (ports, server_ip)
command = nmap_command % (nmap_opts)
print "Run: %s" % (command)
nmap_output = subprocess.check_output(command, shell=True)
return cleanup_nmap_output(nmap_outputoutput, server_ip)
def cleanup_nmap_output(nmap_output, server_ip):
return nmap_output
def audit_open_ports(port_list, whitelist):
return
################################################################################
if __name__=='__main__':
# Sample:
# python ./tcp_port_scan.py --server_list_file XXX --port_list_file XXXX --white_list_file XXX
parser = argparse.ArgumentParser()
parser.add_argument('--server_list_file', required=True,
help="ip list to scan", type=str)
parser.add_argument('--port_list_file', required=True,
help="customized tcp ports to scan", type=str)
parser.add_argument('--white_list_file', required=True,
help="safe ports to allow open", type=str)
args = parser.parse_args()
server_list_file = args.server_list_file
port_list_file = args.port_list_file
white_list_file = args.white_list_file
print nmap_check("104.131.129.100", "")
## File : tcp_port_scan.py ends
| Python | 0.000222 | |
c61850de298a1f40dd84d95d758d3c3faed38160 | Add safe_decode utility function | nose2/util.py | nose2/util.py | import os
import re
import sys
try:
from compiler.consts import CO_GENERATOR
except ImportError:
# IronPython doesn't have a complier module
CO_GENERATOR=0x20
try:
from inspect import isgeneratorfunction # new in 2.6
except ImportError:
import inspect
# backported from Python 2.6
def isgeneratorfunction(func):
return bool((inspect.isfunction(func) or inspect.ismethod(func)) and
func.func_code.co_flags & CO_GENERATOR)
import six
IDENT_RE = re.compile(r'^[_a-zA-Z]\w*$r', re.UNICODE)
VALID_MODULE_RE = re.compile(r'[_a-zA-Z]\w*\.py$', re.UNICODE)
def ln(label, char='-', width=70):
"""Draw a divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
Width and divider char may be specified. Defaults are 70 and '-'
respectively.
"""
label_len = len(label) + 2
chunk = (width - label_len) // 2
out = '%s %s %s' % (char * chunk, label, char * chunk)
pad = width - len(out)
if pad > 0:
out = out + (char * pad)
return out
def valid_module_name(path):
return VALID_MODULE_RE.search(path)
def name_from_path(path):
# back up to find module root
parts = []
path = os.path.normpath(path)
base = os.path.splitext(path)[0]
candidate, top = os.path.split(base)
parts.append(top)
while candidate:
if ispackage(candidate):
candidate, top = os.path.split(candidate)
parts.append(top)
else:
break
return '.'.join(reversed(parts))
def module_from_name(name):
__import__(name)
return sys.modules[name]
def ispackage(path):
"""
Is this path a package directory?
"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if IDENT_RE.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
def safe_decode(string):
if string is None:
return string
try:
return string.decode()
except UnicodeDecodeError:
pass
try:
return string.decode('utf-8')
except UnicodeDecodeError:
return six.u('<unable to decode>')
| import os
import re
import sys
try:
from compiler.consts import CO_GENERATOR
except ImportError:
# IronPython doesn't have a complier module
CO_GENERATOR=0x20
try:
from inspect import isgeneratorfunction # new in 2.6
except ImportError:
import inspect
# backported from Python 2.6
def isgeneratorfunction(func):
return bool((inspect.isfunction(func) or inspect.ismethod(func)) and
func.func_code.co_flags & CO_GENERATOR)
IDENT_RE = re.compile(r'^[_a-zA-Z]\w*$r', re.UNICODE)
VALID_MODULE_RE = re.compile(r'[_a-zA-Z]\w*\.py$', re.UNICODE)
def ln(label, char='-', width=70):
"""Draw a divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
Width and divider char may be specified. Defaults are 70 and '-'
respectively.
"""
label_len = len(label) + 2
chunk = (width - label_len) // 2
out = '%s %s %s' % (char * chunk, label, char * chunk)
pad = width - len(out)
if pad > 0:
out = out + (char * pad)
return out
def valid_module_name(path):
return VALID_MODULE_RE.search(path)
def name_from_path(path):
# back up to find module root
parts = []
path = os.path.normpath(path)
base = os.path.splitext(path)[0]
candidate, top = os.path.split(base)
parts.append(top)
while candidate:
if ispackage(candidate):
candidate, top = os.path.split(candidate)
parts.append(top)
else:
break
return '.'.join(reversed(parts))
def module_from_name(name):
__import__(name)
return sys.modules[name]
def ispackage(path):
"""
Is this path a package directory?
"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if IDENT_RE.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
| Python | 0.000233 |
a119c9f53babd87f5e5adc1886256c59a21c19a5 | Move content_type formatting support to a different module | hug/format.py | hug/format.py | def content_type(content_type):
'''Attaches an explicit HTML content type to a Hug formatting function'''
def decorator(method):
method.content_type = content_type
return method
return decorator
| Python | 0.000001 | |
ef803b8ac95bb2440d1d312584376149573ac798 | Create bbgdailyhistory.py | BBG/bbgdailyhistory.py | BBG/bbgdailyhistory.py | # *- bbgdailyhistory.py -*
import os
import numpy as np
import pandas as pd
import blpapi
class BBGDailyHistory:
'''
Parameters
----------
sec : str
Ticker
fields : str or list
Field of list of fields ('PX_HIGH', 'PX_LOW', etc...)
start : str
Start date
end : stf
End date
'''
def __init__(self, sec, fields, start=None, end=None):
#self.rqst = rqst
self.sec = sec
self.fields = fields
self.start = start
self.end = end
def get_data(self) -> pd.DataFrame:
'''
Returns
-------
data : pd.DataFrame()
The historical data queried returned in a dataFrame presented as
long format
'''
# Session management
sess = blpapi.Session()
sess.start()
# Define data type
sess.openService('//blp/refdata')
service = sess.getService('//blp/refdata')
# Create request
request = service.createRequest('HistoricalDataRequest')
# Optional request setters
request.set('startDate', self.start)
request.set('endDate', self.end)
request.getElement('securities').appendValue(self.sec)
# Data holders
date_acc =[]
ticker_acc = []
field_acc = []
value_acc = []
# Loop over fields
for fie in self.fields:
request.getElement('fields').appendValue(fie)
sess.sendRequest(request)
endReached = False
while endReached == False:
event = sess.nextEvent(500)
if event.eventType() == blpapi.Event.RESPONSE or event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in event:
fieldData = msg.getElement('securityData').getElement('fieldData')
for data in fieldData.values():
for fld in self.fields:
date_acc.append(data.getElement('date').getValue())
field_acc.append(fld)
value_acc.append(data.getElement(fld).getValue())
ticker_acc.append(self.sec)
if event.eventType() == blpapi.Event.RESPONSE:
endReached = True
sess.stop()
data = pd.DataFrame({'timestamp' : date_acc,
'ticker' : ticker_acc,
'field' : fie,
'value' : value_acc})
return data
if __name__ == "__main__":
# Use example of BBGHistory
#from bbgdatapuller import BBGHistory # Expect folder issue
security = 'SIMA SW Equity' #'SIMA SW Equity'
fields = ['PX_OPEN', 'PX_HIGH', 'PX_LOW', 'PX_LAST']
start = '20200105'
end = '20200109'
d = BBGDailyHistory(sec=security, fields=fields, start=start, end=end).get_data()
print(d.head())
| Python | 0.002245 | |
353868bc281ade826b48d2c5a79ad14986c0d35c | Create lowercaseLists.py | Bits/lowercaseLists.py | Bits/lowercaseLists.py | #!/usr/bin/env python
docs = ["The Corporation", "Valentino: The Last Emperor", "Kings of Patsry"]
movies = ["The Talented Mr. Ripley", "The Network", "Silence of the Lambs", "Wall Street", "Marie Antoinette", "My Mana Godfrey", "Rope", "Sleuth"]
films = [[docs], [movies]]
movies[5] = "My Man Godfrey"
docs[-1] = "Kings of Pastry"
y = [x.lower() for x in ["A","B","C"]]
print(y)
newFilmsList = [x.lower() for x in docs] + [x.lower() for x in movies]
print(newFilmsList)
| Python | 0 | |
c36bc5664f9bfd4d1d954f4aef0402abf9533e47 | add benchmark script | resources/evaluation/SoK/benchmark.py | resources/evaluation/SoK/benchmark.py | from collections import namedtuple
import os
import lancelot
import gzip
import json
import os.path
import yaml
import tabulate
import pandas
import collections
import tqdm
Layout = namedtuple("Layout", ["functions", "basic_blocks", "instructions"])
frameworks = {
"lancelot": "lancelot",
# "ida": "IDA Pro (v7.5)",
# "viv": "vivisect (v0.0.20200804)"
}
def find_by_suffix(path, suffix):
return os.path.join(path, [f for f in os.listdir(path) if f.endswith(suffix)][0])
def get_gt_layout(path):
with open(find_by_suffix(path, ".gt.json.gz"), "rb") as f:
doc = json.loads(gzip.decompress(f.read()))
functions = set([])
basic_blocks = set([])
instructions = set([])
for f in doc["module"].get("fuc", []):
functions.add(int(f["va"]))
for bb in f.get("bb", []):
basic_blocks.add(int(bb["va"]))
for insn in bb.get("instructions", []):
instructions.add(int(insn["va"]))
return Layout(functions, basic_blocks, instructions)
def get_lancelot_workspace(path):
try:
with open(find_by_suffix(path, ".exe"), "rb") as f:
return lancelot.from_bytes(f.read())
except IndexError:
pass
with open(find_by_suffix(path, ".dll"), "rb") as f:
return lancelot.from_bytes(f.read())
def get_lancelot_layout(path):
ws = get_lancelot_workspace(path)
functions = set([])
basic_blocks = set([])
instructions = set([])
for f in ws.get_functions():
functions.add(f)
try:
cfg = ws.build_cfg(f)
except:
continue
else:
for bb in cfg.basic_blocks.values():
basic_blocks.add(bb.address)
va = bb.address
while va < bb.address + bb.length:
try:
insn = ws.read_insn(va)
except ValueError:
break
instructions.add(va)
va += insn.length
return Layout(functions, basic_blocks, instructions)
def precision(found, wanted):
return len(wanted.intersection(found)) / float(len(found))
def recall(found, wanted):
return len(wanted.intersection(found)) / float(len(wanted))
def compute_stats(framework, path):
if framework == "lancelot":
found = get_lancelot_layout(path)
elif framework == "viv":
found = get_viv_layout(path)
elif framework == "ida":
found = get_ida_layout(path)
else:
raise RuntimeError("unexpected framework: " + framework)
wanted = get_gt_layout(path)
return {
"functions": {
"precision": precision(found.functions, wanted.functions),
"recall": recall(found.functions, wanted.functions),
},
"basic_blocks": {
"precision": precision(found.basic_blocks, wanted.basic_blocks),
"recall": recall(found.basic_blocks, wanted.basic_blocks),
},
"instructions": {
"precision": precision(found.instructions, wanted.instructions),
"recall": recall(found.instructions, wanted.instructions),
},
}
def render_stats(stats):
return yaml.dump(stats, default_flow_style=False)
def collect_tests():
base = "SoK-windows-testsuite/"
for build in os.listdir(base):
build = os.path.join(base, build)
if not os.path.isdir(build):
continue
for exe in os.listdir(build):
exe = os.path.join(build, exe)
yield exe
if __name__ == "__main__":
results = collections.defaultdict(dict)
for test in tqdm.tqdm(list(collect_tests())):
for framework in frameworks.keys():
results[framework][test] = compute_stats(framework, test)
def collect_pandas(results):
return pandas.DataFrame.from_records(
{
"functions.precision": v["functions"]["precision"],
"functions.recall": v["functions"]["recall"],
"basic_blocks.precision": v["basic_blocks"]["precision"],
"basic_blocks.recall": v["basic_blocks"]["recall"],
"instructions.precision": v["instructions"]["precision"],
"instructions.recall": v["instructions"]["recall"],
"test": k,
}
for k, v in results.items()
)
for fw in frameworks.keys():
if fw not in results:
continue
pd = collect_pandas(results[fw])
print(f"{frameworks[fw]} vs SoK test suite")
print(" functions:")
print(" precision: %0.3f" % (pd["functions.precision"].mean()))
print(" recall: %0.3f" % (pd["functions.recall"].mean()))
print(" basic blocks:")
print(" precision: %0.3f" % (pd["basic_blocks.precision"].mean()))
print(" recall: %0.3f" % (pd["basic_blocks.recall"].mean()))
print(" instructions:")
print(" precision: %0.3f" % (pd["instructions.precision"].mean()))
print(" recall: %0.3f" % (pd["instructions.recall"].mean()))
rows = []
for test in results["lancelot"].keys():
frecall = results["lancelot"][test]["functions"]["recall"]
rows.append((frecall, test))
rows = sorted(rows)
print("")
print("worst performing test cases:")
print(tabulate.tabulate(rows[:20]))
| Python | 0.000001 | |
9afd1a8d3584e45d32858c3b8fa44efd0f1a09f1 | add unit test for ofproto automatic detection | ryu/tests/unit/ofproto/test_ofproto.py | ryu/tests/unit/ofproto/test_ofproto.py | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import eq_
LOG = logging.getLogger('test_ofproto')
class TestOfprotCommon(unittest.TestCase):
""" Test case for ofproto
"""
def test_ofp_event(self):
import ryu.ofproto
reload(ryu.ofproto)
import ryu.controller.ofp_event
reload(ryu.controller.ofp_event)
def test_ofproto(self):
# When new version of OFP support is added,
# this test must be updated.
import ryu.ofproto
reload(ryu.ofproto)
ofp_modules = ryu.ofproto.get_ofp_modules()
import ryu.ofproto.ofproto_v1_0
import ryu.ofproto.ofproto_v1_2
import ryu.ofproto.ofproto_v1_3
eq_(set(ofp_modules.keys()), set([ryu.ofproto.ofproto_v1_0.OFP_VERSION,
ryu.ofproto.ofproto_v1_2.OFP_VERSION,
ryu.ofproto.ofproto_v1_3.OFP_VERSION,
]))
consts_mods = set([ofp_mod[0] for ofp_mod in ofp_modules.values()])
eq_(consts_mods, set([ryu.ofproto.ofproto_v1_0,
ryu.ofproto.ofproto_v1_2,
ryu.ofproto.ofproto_v1_3,
]))
parser_mods = set([ofp_mod[1] for ofp_mod in ofp_modules.values()])
import ryu.ofproto.ofproto_v1_0_parser
import ryu.ofproto.ofproto_v1_2_parser
import ryu.ofproto.ofproto_v1_3_parser
eq_(parser_mods, set([ryu.ofproto.ofproto_v1_0_parser,
ryu.ofproto.ofproto_v1_2_parser,
ryu.ofproto.ofproto_v1_3_parser,
]))
| Python | 0 | |
81afc4ed6d7390567dfe9949c9f332b36a6add9c | Add lang install es_ES | l10n_cr_base/l10n_cr_base.py | l10n_cr_base/l10n_cr_base.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# l10n_cr_base.py
# l10n_cr_base
# First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
from osv import osv,fields
from base.res.partner.partner import _lang_get
class res_partner_function(osv.osv):
'''
Inherits res.partner.function to add translation to code and name fields
'''
_inherit = 'res.partner.function'
_columns = {
'name': fields.char('Function Name', size=64, required=True, translate=True),
'code': fields.char('Code', size=8, required=True, translate=True),
}
res_partner_function()
class res_partner_title(osv.osv):
'''
Inherits res.partner.title to add translation to shortcut field
'''
_inherit = 'res.partner.title'
_columns = {
'shortcut': fields.char('Shortcut', required=True, size=16, translate=True),
}
res_partner_title()
class res_partner(osv.osv):
'''
Inherits res.partner to add id_number field
'''
_inherit = 'res.partner'
_columns = {
'id_number': fields.char('ID Number', size=30,required=False, select=1),
'lang': fields.selection(_lang_get, 'Language', size=5, required=True, help="If the selected language is loaded in the system, all documents related to this partner will be printed in this language. If not, it will be english."),
}
_defaults = {
'lang': lambda *a: 'es_ES',
}
res_partner()
def _lang_es_install(self, cr, uid, data, context):
lang = 'es_ES'
modobj = pooler.get_pool(cr.dbname).get('ir.module.module')
mids = modobj.search(cr, uid, [('state', '=', 'installed')])
modobj.update_translations(cr, uid, mids, lang)
return {}
_lang_es_install()
| # -*- encoding: utf-8 -*-
##############################################################################
#
# l10n_cr_base.py
# l10n_cr_base
# First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
from osv import osv,fields
from base.res.partner.partner import _lang_get
class res_partner_function(osv.osv):
'''
Inherits res.partner.function to add translation to code and name fields
'''
_inherit = 'res.partner.function'
_columns = {
'name': fields.char('Function Name', size=64, required=True, translate=True),
'code': fields.char('Code', size=8, required=True, translate=True),
}
res_partner_function()
class res_partner_title(osv.osv):
'''
Inherits res.partner.title to add translation to shortcut field
'''
_inherit = 'res.partner.title'
_columns = {
'shortcut': fields.char('Shortcut', required=True, size=16, translate=True),
}
res_partner_title()
class res_partner(osv.osv):
'''
Inherits res.partner to add id_number field
'''
_inherit = 'res.partner'
_columns = {
'id_number': fields.char('ID Number', size=30,required=False, select=1),
'lang': fields.selection(_lang_get, 'Language', size=5, required=True, help="If the selected language is loaded in the system, all documents related to this partner will be printed in this language. If not, it will be english."),
}
_defaults = {
'lang': lambda *a: 'es_ES',
}
res_partner()
| Python | 0 |
e1810dcfd635198363838ed5c4dcd92c1cef1b07 | use wikistats lib to update languages_by_size | scripts/maintenance/wikimedia_sites.py | scripts/maintenance/wikimedia_sites.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script that updates the language lists in Wikimedia family files."""
#
# (C) xqt, 2009-2016
# (C) Pywikibot team, 2008-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import re
import pywikibot
from pywikibot.data import wikistats
from pywikibot.family import Family
# supported families by this script
families_list = [
'anarchopedia',
'wikibooks',
'wikinews',
'wikipedia',
'wikiquote',
'wikisource',
'wikiversity',
'wikivoyage',
'wiktionary',
]
exceptions = ['-']
def update_family(families):
"""Update family files."""
ws = wikistats.WikiStats()
for family in families or families_list:
pywikibot.output('\nChecking family %s:' % family)
original = Family.load(family).languages_by_size
obsolete = Family.load(family).obsolete
new = []
table = ws.languages_by_size(family)
for code in table:
if not (code in obsolete or code in exceptions):
new.append(code)
# put the missing languages to the right place
missing = original != new and set(original) - set(new)
if missing:
pywikibot.warning("['%s'] not listed at wikistats."
% "', '".join(missing))
index = {}
for code in missing:
index[original.index(code)] = code
i = len(index) - 1
for key in sorted(index.keys(), reverse=True):
new.insert(key - i, index[key])
i -= 1
if original == new:
pywikibot.output(u'The lists match!')
else:
pywikibot.output(u"The lists don't match, the new list is:")
text = u' self.languages_by_size = [\r\n'
line = ' ' * 11
for code in new:
if len(line) + len(code) <= 76:
line += u" '%s'," % code
else:
text += u'%s\r\n' % line
line = ' ' * 11
line += u" '%s'," % code
text += u'%s\r\n' % line
text += u' ]'
pywikibot.output(text)
family_file_name = 'pywikibot/families/%s_family.py' % family
family_file = codecs.open(family_file_name, 'r', 'utf8')
family_text = family_file.read()
old = re.findall(r'(?msu)^ {8}self.languages_by_size.+?\]',
family_text)[0]
family_text = family_text.replace(old, text)
family_file = codecs.open(family_file_name, 'w', 'utf8')
family_file.write(family_text)
family_file.close()
if __name__ == '__main__':
fam = set()
for arg in pywikibot.handle_args():
if arg in families_list:
fam.add(arg)
update_family(fam)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script that updates the language lists in Wikimedia family files."""
#
# (C) xqt, 2009-2014
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import re
import requests
from xml.etree import cElementTree
import pywikibot
from pywikibot.family import Family
URL = 'https://wikistats.wmflabs.org/api.php?action=dump&table=%s&format=xml'
familiesDict = {
'anarchopedia': 'anarchopedias',
'wikibooks': 'wikibooks',
'wikinews': 'wikinews',
'wikipedia': 'wikipedias',
'wikiquote': 'wikiquotes',
'wikisource': 'wikisources',
'wikiversity': 'wikiversity',
'wikivoyage': 'wikivoyage',
'wiktionary': 'wiktionaries',
}
exceptions = ['-']
def update_family(families):
"""Update family files."""
for family in families or familiesDict.keys():
pywikibot.output('\nChecking family %s:' % family)
original = Family.load(family).languages_by_size
obsolete = Family.load(family).obsolete
feed = requests.get(URL % familiesDict[family])
tree = cElementTree.parse(feed)
new = []
for field in tree.findall('row/field'):
if field.get('name') == 'prefix':
code = field.text
if not (code in obsolete or code in exceptions):
new.append(code)
continue
# put the missing languages to the right place
missing = original != new and set(original) - set(new)
if missing:
pywikibot.warning("['%s'] not listed at wikistats."
% "', '".join(missing))
index = {}
for code in missing:
index[original.index(code)] = code
i = len(index) - 1
for key in sorted(index.keys(), reverse=True):
new.insert(key - i, index[key])
i -= 1
if original == new:
pywikibot.output(u'The lists match!')
else:
pywikibot.output(u"The lists don't match, the new list is:")
text = u' self.languages_by_size = [\r\n'
line = ' ' * 11
for code in new:
if len(line) + len(code) <= 76:
line += u" '%s'," % code
else:
text += u'%s\r\n' % line
line = ' ' * 11
line += u" '%s'," % code
text += u'%s\r\n' % line
text += u' ]'
pywikibot.output(text)
family_file_name = 'pywikibot/families/%s_family.py' % family
family_file = codecs.open(family_file_name, 'r', 'utf8')
family_text = family_file.read()
old = re.findall(r'(?msu)^ {8}self.languages_by_size.+?\]',
family_text)[0]
family_text = family_text.replace(old, text)
family_file = codecs.open(family_file_name, 'w', 'utf8')
family_file.write(family_text)
family_file.close()
if __name__ == '__main__':
fam = []
for arg in pywikibot.handleArgs():
if arg in familiesDict.keys() and arg not in fam:
fam.append(arg)
update_family(fam)
| Python | 0 |
657591afce265521078a7cb2f84347c2319b6b33 | Add tests to help with autograding | nbgrader/tests.py | nbgrader/tests.py | import nose.tools
import numpy as np
def assert_unequal(a, b, msg=""):
if a == b:
raise AssertionError(msg)
def assert_same_shape(a, b):
a_ = np.array(a, copy=False)
b_ = np.array(b, copy=False)
assert a_.shape == b_.shape, "{} != {}".format(a_.shape, b_.shape)
def assert_allclose(a, b):
assert np.allclose(a, b), "{} != {}".format(a, b)
assert_equal = nose.tools.eq_
assert_raises = nose.tools.assert_raises
| Python | 0 | |
5e7746d054f7762d93e1f70296fa3b43f882553c | Add synthtool scripts (#3765) | java-bigquerydatatransfer/google-cloud-bigquerydatatransfer/synth.py | java-bigquerydatatransfer/google-cloud-bigquerydatatransfer/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
library = gapic.java_library(
service='bigquerydatatransfer',
version='v1',
config_path='/google/cloud/bigquery/datatransfer/artman_bigquerydatatransfer.yaml',
artman_output_name='google-cloud-bigquerydatatransfer-v1')
s.copy(library / 'gapic-google-cloud-bigquerydatatransfer-v1/src', 'src')
s.copy(library / 'grpc-google-cloud-bigquerydatatransfer-v1/src', '../../google-api-grpc/grpc-google-cloud-bigquerydatatransfer-v1/src')
s.copy(library / 'proto-google-cloud-bigquerydatatransfer-v1/src', '../../google-api-grpc/proto-google-cloud-bigquerydatatransfer-v1/src')
| Python | 0.000001 | |
c13ec330194612832dfb0953d3e561a0ac151d69 | add irrigation baseline file gen scripts | scripts/RT/create_irrigation_files.py | scripts/RT/create_irrigation_files.py | """Create the generalized irrigation files, for now.
https://www.ars.usda.gov/ARSUserFiles/50201000/WEPP/usersum.pdf page 60
"""
from datetime import date
LASTYEAR = date.today().year
def main():
"""Create files."""
for ofecnt in range(1, 7): # Do we have more than 6 OFEs?
fn = f"/i/0/irrigation/ofe{ofecnt}.txt"
with open(fn, "w", encoding="utf-8") as fh:
fh.write("95.7\n") # datver
fh.write(f"{ofecnt} 2 1\n") # furrow depletion
fh.write("0.013 0.025\n") # mindepth maxdepth
for year in range(2007, LASTYEAR + 1):
for ofe in range(1, ofecnt):
fh.write(
f"{ofe} 0.176E-05 1.3 0.5 1.0 175 {year} 185 {year}\n"
)
if __name__ == "__main__":
main()
| Python | 0 | |
dfdbadbd83d41ccf71be74c7add6e04513a752d2 | Add Custom Field Change Report script | scripts/custom_field_change_report.py | scripts/custom_field_change_report.py | import sys
import argparse
from closeio_api import Client as CloseIO_API, APIError
import csv
reload(sys)
sys.setdefaultencoding('utf-8')
parser = argparse.ArgumentParser(description='Export a list of custom field changes for a specific custom field')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
parser.add_argument('--start-date', '-s',
help='The start of the date range you want to export call data for in yyyy-mm-dd format.')
parser.add_argument('--end-date', '-e',
help='The end of the date range you want to export call data for in yyyy-mm-dd format.')
parser.add_argument('--custom-field', '-f', required=True, help='The lcf id of the custom field you\'re searching for')
parser.add_argument('--lead-id', '-l', help='Use this field if you want to narrow your search to a specific lead_id')
parser.add_argument('--user-id', '-u', help='Use this field if you want to narrow your search to changes done by a specific user')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org = api.get('organization/' + org_id, params={ '_fields': 'id,name,memberships,inactive_memberships,lead_custom_fields'})
org_name = org['name'].replace('/', "")
org_memberships = org['memberships'] + org['inactive_memberships']
try:
custom_field_name = [i for i in org['lead_custom_fields'] if i['id'] == args.custom_field][0]['name']
except IndexError as e:
print "ERROR: Could not find custom field %s in %s" % (args.custom_field, org_name)
sys.exit()
users = {}
for member in org_memberships:
users[member['user_id']] = member['user_full_name']
params = { 'object_type': 'lead', 'action': 'updated' }
events = []
custom_lcf = "custom." + str(args.custom_field)
if args.start_date:
params['date_updated__gte'] = args.start_date
if args.end_date:
params['date_updated__lte'] = args.end_date
if args.lead_id:
params['lead_id'] = args.lead_id
if args.user_id:
params['user_id'] = args.user_id
has_more = True
cursor = ''
count = 0
while has_more:
params['_cursor'] = cursor
try:
resp = api.get('event', params=params)
for event in resp['data']:
if custom_lcf in event['changed_fields'] and event.get('previous_data') and event.get('data'):
events.append({
'Date': event['date_created'],
'Lead ID': event['lead_id'],
'Lead Name': event['data']['display_name'],
'User that Made the Change': event['user_id'],
'Old Value': event['previous_data'].get(custom_lcf),
'New Value': event['data'].get(custom_lcf)
})
cursor = resp['cursor_next']
count += len(resp['data'])
print "Analyzed Events: %s" % count
has_more = bool(resp['cursor_next'])
except APIError as e:
pass
print "Total %s Change Events Found: %s" % (custom_field_name, len(events))
f = open('%s %s Custom Field Changes.csv' % (org_name, custom_field_name), 'wt')
try:
ordered_keys = ['Date', 'Lead ID', 'Lead Name', 'User that Made the Change', 'Old Value', 'New Value']
writer = csv.DictWriter(f, ordered_keys)
writer.writeheader()
writer.writerows(events)
finally:
f.close() | Python | 0 | |
1b9aa5ccd500e17aa32c315e212068c8be96216c | Add profiler, now not import. thanks @tweekmoster! | rplugin/python3/deoplete/sources/deoplete_go/profiler.py | rplugin/python3/deoplete/sources/deoplete_go/profiler.py | import functools
import queue
try:
import statistics
stdev = statistics.stdev
mean = statistics.mean
except ImportError:
stdev = None
def mean(l):
return sum(l) / len(l)
try:
import time
clock = time.perf_counter
except Exception:
import timeit
clock = timeit.default_timer
class tfloat(float):
color = 39
def __str__(self):
n = self * 1000
return '\x1b[%dm%f\x1b[mms' % (self.color, n)
def profile(func):
name = func.__name__
samples = queue.deque(maxlen=5)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not self.debug_enabled:
return func(self, *args, **kwargs)
start = clock()
ret = func(self, *args, **kwargs)
n = tfloat(clock() - start)
if len(samples) < 2:
m = 0
d = 0
n.color = 36
else:
m = mean(samples)
if stdev:
d = tfloat(stdev(samples))
else:
d = 0
if n <= m + d:
n.color = 32
elif n > m + d * 2:
n.color = 31
else:
n.color = 33
samples.append(n)
self.info('\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)',
name, n, m, d)
return ret
return wrapper
| Python | 0 | |
6df31f3b1049071bf5112521de8876d94e8a959a | Add support for the TinyOS 2.x serial forwarder protocol | python/smap/iface/tinyos.py | python/smap/iface/tinyos.py | """
Copyright (c) 2013 Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Provide twisted support for TinyOS serial forwarders. You should
subclass TOSSerialClient and implement packetReceived. You can then
connect it to a transport, for instance a serial port, using:
from twisted.internet.serialport import SerialPort
SerialPort(KetiMoteReceiver(self), port, reactor, baudrate=baud)
Based on Razvan Musaloiu-E.'s tos.py
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
from twisted.internet import reactor, protocol
class TOSSerialClient(protocol.Protocol):
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
def __init__(self):
self.packet = []
def dataReceived(self, data):
self._pump(data)
def _pump(self, data):
# Developer notes:
#
# Packet data read from Serial is in this format:
# [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte
# values cannot occur within it. When [Escaped data] has been
# unescaped, the last 2 bytes are a 16-bit CRC of the earlier
# part of the packet (excluding the initial HDLC_FLAG_BYTE
# byte)
#
# It's also possible that the serial device was half-way
# through transmitting a packet when this function was called
# (app was just started). So we also neeed to handle this case:
#
# [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# In this case we skip over the first (incomplete) packet.
#
# Read bytes until we get to a HDLC_FLAG_BYTE value
# (either the end of a packet, or the start of a new one)
for d in data:
if ord(d) == self.HDLC_FLAG_BYTE:
self._deliver()
else:
self.packet.append(ord(d))
def _deliver(self):
# Decode the packet, and check CRC:
packet = self._unescape(self.packet)
self.packet = []
crc = self._crc16(0, packet[:-2])
packet_crc = self._decode(packet[-2:])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % \
(crc, packet_crc, ["%2x" % i for i in packet])
if len(packet):
self.packetReceived(''.join(map(chr, packet[:-2])))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
| Python | 0 | |
e9f2e966361d8a23c83fbbbb4a4b3d4046203a16 | Test script for the heart container | CERR_core/Contouring/models/heart/test/test.py | CERR_core/Contouring/models/heart/test/test.py | #Test script for heart container testing if all the imports are successful
import sys
import os
import numpy as np
import h5py
import fnmatch
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from torchvision.utils import make_grid
from dataloaders.utils import decode_seg_map_sequence
from skimage.transform import resize
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
input_size = 512
def main(argv):
print("All imports done. Test Successful")
if __name__ == "__main__":
main(sys.argv) | Python | 0 | |
b223c8be2bcb11d529a07997c05a9c5ab2b183b2 | Add basic tests for run length encoding printable | csunplugged/tests/resources/generators/test_run_length_encoding.py | csunplugged/tests/resources/generators/test_run_length_encoding.py | from unittest import mock
from django.http import QueryDict
from django.test import tag
from resources.generators.RunLengthEncodingResourceGenerator import RunLengthEncodingResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class RunLengthEncodingResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
def test_worksheet_version_values(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.run_parameter_smoke_tests(generator, "worksheet_type")
def test_subtitle_student_basic_a4(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - a4"
)
def test_subtitle_student_basic_letter(self):
query = QueryDict("worksheet_type=student-basic&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - letter"
)
def test_subtitle_student_create_a4(self):
query = QueryDict("worksheet_type=student-create&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - a4"
)
def test_subtitle_student_create_letter(self):
query = QueryDict("worksheet_type=student-create&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - letter"
)
def test_subtitle_student_create_colour_a4(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - a4"
)
def test_subtitle_student_create_colour_letter(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - letter"
)
def test_subtitle_student_teacher_a4(self):
query = QueryDict("worksheet_type=teacher&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - a4"
)
def test_subtitle_student_teacher_letter(self):
query = QueryDict("worksheet_type=teacher&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - letter"
)
| Python | 0 | |
24c3166906c8431523c641721e635fdc28fd91ce | add server that tests if a cookie was set | cookiescheck-test-server.py | cookiescheck-test-server.py | import sys
from flask import Flask, request, send_from_directory, make_response, abort
app = Flask(__name__)
filepath = None
mainpath = None
@app.route('/<path:path>')
def get(path):
ret = make_response(send_from_directory(filepath, path))
if path == mainpath:
ret.set_cookie('auth', '1')
elif request.cookies.get('auth') == '1':
pass
else:
abort(403)
return ret
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: %s <dir-to-serve> <main-file>"
sys.exit(1)
print sys.argv
filepath = sys.argv[1]
mainpath = sys.argv[2]
app.run(host='0.0.0.0')
| Python | 0.000001 | |
c011154135a73db2c5bba247fc33f94032553f2e | Correct package files | janitor/__init__.py | janitor/__init__.py | import utils
utils = utils
logger, logger_api = utils.logger.setup_loggers(
"janitor"
)
| Python | 0.000082 | |
1c692359231f97c3b398861fef9d5c695e8ff5f8 | Add config file module using Property List backed files. | core/pycopia/plistconfig.py | core/pycopia/plistconfig.py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# Copyright (C) 2010 Keith Dart <keith@dartworks.biz>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Config object backed by a property list file. A property list file is an XML
format text file using Apple's Property List DTD (PropertyList-1.0.dtd).
"""
from __future__ import absolute_import
from __future__ import print_function
#from __future__ import unicode_literals
from __future__ import division
import os
import re
import plistlib
class AutoAttrDict(dict):
"""A dictionary with attribute-style access and automatic container node creation.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __getstate__(self):
return self.__dict__.items()
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AutoAttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
try:
return super(AutoAttrDict, self).__getitem__(name)
except KeyError:
d = AutoAttrDict()
super(AutoAttrDict, self).__setitem__(name, d)
return d
def __delitem__(self, name):
return super(AutoAttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
return AutoAttrDict(self)
# perform shell-like variable expansion
def expand(self, value):
if '$' not in value:
return value
i = 0
while 1:
mo = _var_re.search(value, i)
if not mo:
return value
i, j = mo.span(0)
oname = vname = mo.group(1)
if vname.startswith('{') and vname.endswith('}'):
vname = vname[1:-1]
tail = value[j:]
value = value[:i] + str(self.get(vname, "$"+oname))
i = len(value)
value += tail
def tofile(self, path_or_file):
write_config(self, path_or_file)
_var_re = re.compile(r'\$([a-zA-Z0-9_\?]+|\{[^}]*\})')
def read_config_from_string(pstr):
d = plistlib.readPlistFromString(pstr)
return _convert_dict(d)
def read_config(path_or_file):
"""Read a property list config file."""
d = plistlib.readPlist(path_or_file)
return _convert_dict(d)
def _convert_dict(d):
for key, value in d.iteritems():
if isinstance(value, dict):
d[key] = _convert_dict(value)
return AutoAttrDict(d)
def write_config_to_string(conf):
return plistlib.writePlistToString(conf)
def write_config(conf, path_or_file):
"""Write a property list config file."""
plistlib.writePlist(conf, path_or_file)
def get_config(filename=None, init=None):
"""Get an existing or new plist config object.
Optionally initialize from another dictionary.
"""
if init is not None:
return _convert_dict(init)
if filename is None:
return AutoAttrDict()
if os.path.exists(filename):
return read_config(filename)
else:
d = AutoAttrDict()
write_config(d, filename)
return d
if __name__ == "__main__":
cf = get_config()
cf.parts.program.flags.flagname = 2
cf.parts.program.path = "$BASE/program"
cf.parts.BASE = "bin"
assert cf.parts.program.flags.flagname == 2
assert cf.parts.program.path == "$BASE/program"
assert cf.parts.expand(cf.parts.program.path) == "bin/program"
cf.tofile("/tmp/testplist.plist")
del cf
cf = read_config("/tmp/testplist.plist")
assert cf.parts.program.flags.flagname == 2
assert cf.parts.program.path == "$BASE/program"
assert cf.parts.expand(cf.parts.program.path) == "bin/program"
cf.parts.program.flags.flagname = 3
assert cf.parts.program.flags.flagname == 3
cf.tofile("/tmp/testplist.plist")
del cf
cf = read_config("/tmp/testplist.plist")
assert cf.parts.program.flags.flagname == 3
| Python | 0 | |
b6aedc1589c754bb867381e309aba5ae19f7bb1a | Create GDAL_SaveRaster.py | GDAL_SaveRaster.py | GDAL_SaveRaster.py |
from osgeo import gdal
def save_raster ( output_name, raster_data, dataset, driver="GTiff" ):
"""
A function to save a 1-band raster using GDAL to the file indicated
by ``output_name``. It requires a GDAL-accesible dataset to collect
the projection and geotransform.
"""
# Open the reference dataset
g_input = gdal.Open ( dataset )
# Get the Geotransform vector
geo_transform = g_input.GetGeoTransform ()
x_size = g_input.RasterXSize # Raster xsize
y_size = g_input.RasterYSize # Raster ysize
srs = g_input.GetProjectionRef () # Projection
# Need a driver object. By default, we use GeoTIFF
if driver == "GTiff":
driver = gdal.GetDriverByName ( driver )
dataset_out = driver.Create ( output_name, x_size, y_size, 1, \
gdal.GDT_Float32, ['TFW=YES', \
'COMPRESS=LZW', 'TILED=YES'] )
else:
driver = gdal.GetDriverByName ( driver )
dataset_out = driver.Create ( output_name, x_size, y_size, 1, \
gdal.GDT_Float32 )
dataset_out.SetGeoTransform ( geo_transform )
dataset_out.SetProjection ( srs )
dataset_out.GetRasterBand ( 1 ).WriteArray ( \
raster_data.astype(np.float32) )
dataset_out.GetRasterBand ( 1 ).SetNoDataValue ( float(-999) )
dataset_out = None
| Python | 0 | |
cf066fd373f0d12a43bad24db9e645e257617306 | add consts | drda/consts.py | drda/consts.py | ##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016 Hajime Nakagami
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
DRDA_TYPE_INTEGER = 0x02
DRDA_TYPE_NINTEGER = 0x03
DRDA_TYPE_SMALL = 0x04
DRDA_TYPE_NSMALL = 0x05
DRDA_TYPE_1BYTE_INT = 0x06
DRDA_TYPE_N1BYTE_INT = 0x07
DRDA_TYPE_FLOAT16 = 0x08
DRDA_TYPE_NFLOAT16 = 0x09
DRDA_TYPE_FLOAT8 = 0x0A
DRDA_TYPE_NFLOAT8 = 0x0B
DRDA_TYPE_FLOAT4 = 0x0C
DRDA_TYPE_NFLOAT4 = 0x0D
DRDA_TYPE_DECIMAL = 0x0E
DRDA_TYPE_NDECIMAL = 0x0F
DRDA_TYPE_ZDECIMAL = 0x10
DRDA_TYPE_NZDECIMAL = 0x11
DRDA_TYPE_NUMERIC_CHAR = 0x12
DRDA_TYPE_NNUMERIC_CHAR = 0x13
DRDA_TYPE_RSET_LOC = 0x14
DRDA_TYPE_NRSET_LOC = 0x15
DRDA_TYPE_INTEGER8 = 0x16
DRDA_TYPE_NINTEGER8 = 0x17
DRDA_TYPE_LOBLOC = 0x18
DRDA_TYPE_NLOBLOC = 0x19
DRDA_TYPE_CLOBLOC = 0x1A
DRDA_TYPE_NCLOBLOC = 0x1B
DRDA_TYPE_DBCSCLOBLOC = 0x1C
DRDA_TYPE_NDBCSCLOBLOC = 0x1D
DRDA_TYPE_ROWID = 0x1E
DRDA_TYPE_NROWID = 0x1F
DRDA_TYPE_DATE = 0x20
DRDA_TYPE_NDATE = 0x21
DRDA_TYPE_TIME = 0x22
DRDA_TYPE_NTIME = 0x23
DRDA_TYPE_TIMESTAMP = 0x24
DRDA_TYPE_NTIMESTAMP = 0x25
DRDA_TYPE_FIXBYTE = 0x26
DRDA_TYPE_NFIXBYTE = 0x27
DRDA_TYPE_VARBYTE = 0x28
DRDA_TYPE_NVARBYTE = 0x29
DRDA_TYPE_LONGVARBYTE = 0x2A
DRDA_TYPE_NLONGVARBYTE = 0x2B
DRDA_TYPE_NTERMBYTE = 0x2C
DRDA_TYPE_NNTERMBYTE = 0x2D
DRDA_TYPE_CSTR = 0x2E
DRDA_TYPE_NCSTR = 0x2F
DRDA_TYPE_CHAR = 0x30
DRDA_TYPE_NCHAR = 0x31
DRDA_TYPE_VARCHAR = 0x32
DRDA_TYPE_NVARCHAR = 0x33
DRDA_TYPE_LONG = 0x34
DRDA_TYPE_NLONG = 0x35
DRDA_TYPE_GRAPHIC = 0x36
DRDA_TYPE_NGRAPHIC = 0x37
DRDA_TYPE_VARGRAPH = 0x38
DRDA_TYPE_NVARGRAPH = 0x39
DRDA_TYPE_LONGRAPH = 0x3A
DRDA_TYPE_NLONGRAPH = 0x3B
DRDA_TYPE_MIX = 0x3C
DRDA_TYPE_NMIX = 0x3D
DRDA_TYPE_VARMIX = 0x3E
DRDA_TYPE_NVARMIX = 0x3F
DRDA_TYPE_LONGMIX = 0x40
DRDA_TYPE_NLONGMIX = 0x41
DRDA_TYPE_CSTRMIX = 0x42
DRDA_TYPE_NCSTRMIX = 0x43
DRDA_TYPE_PSCLBYTE = 0x44
DRDA_TYPE_NPSCLBYTE = 0x45
DRDA_TYPE_LSTR = 0x46
DRDA_TYPE_NLSTR = 0x47
DRDA_TYPE_LSTRMIX = 0x48
DRDA_TYPE_NLSTRMIX = 0x49
DRDA_TYPE_SDATALINK = 0x4C
DRDA_TYPE_NSDATALINK = 0x4D
DRDA_TYPE_MDATALINK = 0x4E
DRDA_TYPE_NMDATALINK = 0x4F
| Python | 0.000002 | |
542731f7fb3f5d09c4de4340f7ce18b7cbf41172 | Create Client.py | Client.py | Client.py | from Networking import Client
client = Client()
client.connect('10.42.42.25', 12345).send({'Ordre':'Timelapse', 'Action':["/home/pi/photo3", 24, 30, 0.25, False]})
reponse = client.recv()
client.close()
| Python | 0.000001 | |
745adf9898e6dc80d37f1a0c3c4361acf76f2feb | Create main.py | main.py | main.py | import webapp2
import logging
import json
import utils
import re
import advanced
class show_search_results(utils.BaseHandler):
def post(self):
#get info about slack post
token = self.request.get('token')
channel = self.request.get('channel')
text = self.request.get('text')
user = self.request.get('user_name')
user_id = self.request.get('user_id')
#verify that the call to app is being made by an authorized slack slash command
if token == 'your_token':
#extract the search term from the command and build the resulting search link
query_name = re.match("[^\s]+", text)
if query_name is not None:
query_name = image_name.group(0)
query_link = "<https://google.com/q?={}".format(query_name)
self.response.out.write("".format(query_link))
#call the Slack incoming webhook
url = "your_incoming_webhooks_url"
payload = json.dumps(
{"channel":channel, "username":"Highfive", "text":"".format(query_link)})
result = urlfetch.fetch(url=url,
method=urlfetch.POST,
payload=payload)
app = webapp2.WSGIApplication([
('/slack-five', query_link, debug=True)])
| Python | 0.000001 | |
1c1ce1c3ba35c546828392dd69bd07176e2888ce | add MIDI renderer | midi.py | midi.py | #!/usr/bin/env python
import argparse
import colorsys
import math
import socket
import sys
import time
EARTH_RADIUS = 6371000
PURGE_TIME = 10
UPDATE_INTERVAL = 5.0 # seconds
last_update = time.time()
MAX_ALTITUDE = 40000
MAX_DISTANCE = 200000
MIDI_NOTES_COUNT = 127
MIDI_VOLUME_MAX = 127
all_aircraft = {} # Maps ADSB ID -> aircraft info
next_slot = 0 # Which LED to use to show this aircraft
def distance(lat1, lon1, lat2, lon2):
# Calcuate distance between two points on the earth
d_lat = math.radians(lat2-lat1)
d_lon = math.radians(lon2-lon1)
lat1_rad = math.radians(lat1)
lat2_rad = math.radians(lat2)
a = (math.sin(d_lat/2) * math.sin(d_lat/2) +
math.sin(d_lon/2) * math.sin(d_lon/2) *
math.cos(lat1_rad) * math.cos(lat2_rad))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = EARTH_RADIUS * c;
return d
def bearing(lat1, lon1, lat2, lon2):
# Calcuate bearing from (lat1, lon1) to (lat2, lon2)
lat1_rad = math.radians(lat1)
long1_rad = math.radians(lon1)
lat_rad = math.radians(lat2)
long2_rad = math.radians(lon2)
d_lon = long2_rad - long1_rad
d_phi = math.log(math.tan(lat_rad/2.0+math.pi/4.0)/math.tan(lat1_rad/2.0+math.pi/4.0))
if abs(d_lon) > math.pi:
if d_lon > 0.0:
d_lon = -(2.0 * math.pi - dLong)
else:
d_lon = (2.0 * math.pi + dLong)
bearing = (math.degrees(math.atan2(d_lon, d_phi)) + 360.0) % 360.0;
return bearing
def get_color(aircraft):
# Map aircraft position, etc to an LED color
# Brightness = 255 when 0 m away, 0 when 100km away
# Color = blue for now. Clamp at 100,000 meters
if aircraft["distance"] < 100000:
scaled_distance = 255 - int((aircraft["distance"] / 100000.0) * 255)
else:
scaled_distance = 255
#print "distance %s -> %s = %s" % (aircraft["distance"], scaled_distance, RGB_ntuples[scaled_distance])
return RGB_ntuples[255 - scaled_distance]
#return Color(0, 0, scaled_distance )
def print_aircraft():
print ""
for a in sorted(all_aircraft.values(), key=lambda x: x["slot"]):
print ("%d: id %s alt %5d lat %6.2f lon %6.2f dist %5.0f m "
"bearing %0.0f deg" %
(a["slot"], a["id"], a["altitude"], a["lat"], a["lon"],
a["distance"], a["bearing"]))
def print_sound():
global last_update
if time.time() < last_update + UPDATE_INTERVAL:
return
last_update = time.time()
print("%d aircraft" % len(all_aircraft))
for a in sorted(all_aircraft.values(), key=lambda x: x["slot"]):
note = a["altitude"] / MAX_ALTITUDE * MIDI_NOTES_COUNT
volume = (MAX_DISTANCE - a["distance"]) / MAX_DISTANCE * MIDI_VOLUME_MAX
print("Pitch %d, Volume %d" % (note, volume))
LED_COUNT = 60
def process_line(line, mylat, mylon):
global next_slot
global all_aircraft
parts = line.split(",")
if parts and (parts[0] == "MSG"):
if parts[1] == "3":
# Airborne position message
try:
aircraft_id = parts[4]
altitude = int(parts[11])
lat = float(parts[14])
lon = float(parts[15])
d = distance(lat, lon, mylat, mylon)
b = bearing(mylat, mylon, lat, lon)
if aircraft_id not in all_aircraft:
# New plane
slot = next_slot
next_slot = (next_slot + 1) % LED_COUNT
aircraft = {
"id": aircraft_id,
"altitude": altitude,
"lat": lat,
"lon": lon,
"distance": d,
"bearing": b,
"update": time.time(),
"slot": slot,
}
else:
# Update existing
aircraft = all_aircraft[aircraft_id]
aircraft.update({
"altitude": altitude,
"lat": lat,
"lon": lon,
"distance": d,
"bearing": b,
"update": time.time(),
})
all_aircraft[aircraft_id] = aircraft
for id, aircraft in all_aircraft.items():
#print_aircraft()
print_sound()
# Purge things we haven't seen in 10 minutes
for id, aircraft in all_aircraft.items():
if aircraft["update"] < time.time() - PURGE_TIME:
del all_aircraft[id]
print "Purged aircraft %s" % id
except:
pass
#sys.stderr.write("Ignored %s" % line)
def theremin(host, port, mylat, mylon):
print "Connect to %s:%d" % (host, port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
fp = sock.makefile()
try:
while True:
line = fp.readline()
process_line(line, mylat, mylon)
finally:
sock.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--host",
help="IP address or hostname of host running dump1090",
required=True)
parser.add_argument("-p", "--port", type=int,
help="Port for dump1090 server",
required=True)
parser.add_argument("--lat", type=float, help="Your latitude",
required=True)
parser.add_argument("--lon", type=float, help="Your longitude",
required=True)
args = parser.parse_args()
theremin(args.host, args.port, args.lat, args.lon)
if __name__ == "__main__":
main()
| Python | 0 | |
42753fc71b6a7cbe8697ba0eb053fdbc39c852a1 | add test_eval | misc/test_eval.py | misc/test_eval.py |
# eval
def main():
dictString = "{'Define1':[[63.3,0.00,0.5,0.3,0.0],[269.3,0.034,1.0,1.0,0.5]," \
"[332.2,0.933,0.2,0.99920654296875,1],[935.0,0.990,0.2,0.1,1.0]]," \
"'Define2':[[63.3,0.00,0.5,0.2,1.0],[269.3,0.034,1.0,0.3,0.5]," \
"[332.2,0.933,0.2, 0.4,0.6],[935.0,0.990,1.0, 0.5,0.0]],}"
dict = eval(dictString)
print("keys: ", dict.keys())
print("Define1 value ", dict['Define1'])
# execfile
execfile("test_list.py")
if __name__ == '__main__':
main()
| Python | 0 | |
293f0dde7f329399648317b8d67322604f2e9292 | Add window_title_async.py module | py3status/modules/window_title_async.py | py3status/modules/window_title_async.py | """
Display the current window title with async update.
Uses asynchronous update via i3 IPC events.
Provides instant title update only when it required.
Configuration parameters:
- format : format of the title, default: "{title}".
- empty_title : string that will be shown instead of the title
when the title is hidden, default: 500 spaces.
- always_show : do not hide the title when it can be already
visible (e.g. in tabbed layout), default: False.
Requires:
- i3ipc (https://github.com/acrisci/i3ipc-python)
@author Anon1234 https://github.com/Anon1234
@license BSD
"""
from threading import Thread
import i3ipc
class Py3status:
format = "{title}"
empty_title = " " * 500
always_show = False
def __init__(self):
self.title = self.empty_title
# we are listening to i3 events in a separate thread
t = Thread(target=self._loop)
t.daemon = True
t.start()
def _loop(self):
def get_title(conn):
tree = conn.get_tree()
w = tree.find_focused()
p = w.parent
# dont show window title when the window already has means
# to display it
if not self.always_show and (w.border == "normal" or
(p.layout in ("stacked", "tabbed") and len(p.nodes) > 1)):
return self.empty_title
else:
return w.name
def update_title(conn, e):
# catch only focused window title updates
title_changed = hasattr(e, "container") and e.container.focused
# check if we need to update title due to changes
# in the workspace layout
layout_changed = (hasattr(e, "binding") and (
e.binding.command.startswith("layout") or
e.binding.command.startswith("border")))
if title_changed or layout_changed:
self.title = get_title(conn) or self.empty_title
def clear_title(*args):
self.title = self.empty_title
conn = i3ipc.Connection()
self.title = get_title(conn) # set title on startup
# The order of following callbacks is important!
# clears the title on empty ws
conn.on('workspace::focus', clear_title)
# clears the title when the last window on ws was closed
conn.on("window::close", clear_title)
# listens for events which can trigger the title update
conn.on("window::title", update_title)
conn.on("window::focus", update_title)
conn.on("binding", update_title)
conn.main() # run the event loop
def window_title(self, i3s_output_list, i3s_config):
resp = {
'cached_until': 0, # update ASAP
'full_text': self.format.format(title=self.title),
}
for option in ('min_width', 'align', 'separator'):
try:
resp[option] = getattr(self, option)
except AttributeError:
continue
return resp
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
while True:
print(x.window_title([], {}))
sleep(1)
| Python | 0.000004 | |
a9da84352d6ff8b26a8e25ac9d15d5737c84225f | Add problem 12 | problem_12.py | problem_12.py | from crypto_library import ecb_aes
from problem_11 import distinguish_encryption_mode
from string import printable
'''
from crypto_library import BLOCKSIZE
import random
ENCRYPTION_KEY = ''.join(random.choice(printable) for _ in range(BLOCKSIZE))
'''
def new_encryption_oracle(adversary_input):
ENCRYPTION_KEY = ',y!3<CWn@1?wwF]\x0b'
unknown_input = 'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK'
return ecb_aes(adversary_input+unknown_input, ENCRYPTION_KEY)
def find_blocksize():
adversary_input = ''
previous_length = len(new_encryption_oracle(adversary_input))
found_block_change = False
while True:
adversary_input += '0'
current_length = len(new_encryption_oracle(adversary_input))
if current_length > previous_length:
if found_block_change:
return current_length - previous_length
found_block_change = True
previous_length = current_length
def find_unknown_text_length(blocksize):
adversary_input = ''
previous_length = len(new_encryption_oracle(adversary_input))
while True:
adversary_input += '0'
current_length = len(new_encryption_oracle(adversary_input))
if current_length > previous_length:
return current_length - len(adversary_input) - blocksize
def find_single_ecb_character(blocksize, decrypted, unknown_text_length):
input_padding = '0'*(blocksize*(unknown_text_length/blocksize + 1) - len(decrypted) - 1)
test_padding = input_padding + decrypted
block_position = len(test_padding)/blocksize
for test_char in printable:
test_character = test_padding + test_char
test_character_ciphertext = new_encryption_oracle(test_character)
test_blocks = [test_character_ciphertext[i*blocksize:(i+1)*blocksize] for i in range(len(test_character_ciphertext)/blocksize)]
ciphertext = new_encryption_oracle(input_padding)
cipher_blocks = [ciphertext[i*blocksize:(i+1)*blocksize] for i in range(len(ciphertext)/blocksize)]
if test_blocks[block_position] == cipher_blocks[block_position]:
return test_char
if __name__ == '__main__':
blocksize = find_blocksize()
unknown_text_length = find_unknown_text_length(blocksize)
chosen_input = '0'*(3*blocksize)
detection_ciphertext = new_encryption_oracle(chosen_input)
encryption_mode = distinguish_encryption_mode(detection_ciphertext)
if encryption_mode == 'ecb':
decrypted = ''
while len(decrypted) < unknown_text_length:
decrypted += find_single_ecb_character(blocksize, decrypted, unknown_text_length)
print decrypted.decode('base64')
| Python | 0.000203 | |
0ecc153d3946258f7daddd48bfc2870cb497b5db | Add IPlugSession interface | pyramid_pluggable_session/interfaces.py | pyramid_pluggable_session/interfaces.py | from zope.interface import Interface
class IPlugSession(Interface):
""" In interface that describes a pluggable session
"""
def loads(session, request):
""" This function given a ``session`` and ``request`` should using the
``session_id`` attribute of the ``session``
This function should return either the opaque session information or None.
"""
def dumps(session, request, session_data):
""" This function given a ``session`` and ``request`` should using the
``_session_id`` attribute of the ``session`` write the session
information, with the ``session_id`` being used as a unique identifier,
any previously stored session data should overwritten. ``session_data``
is an opaque object, it's contents are a serialised version of the
session data.
"""
| Python | 0 | |
a0d8eff20cfd8b60be005e31692af74837ca16f5 | test math.ceil() function | pythonPractiseSamples/mathExcercises.py | pythonPractiseSamples/mathExcercises.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Damian Ziobro <damian@xmementoit.com>
import unittest
import math
class TestMathMethods(unittest.TestCase):
def setUp(self):
self.number = 3.5
def test_ceil(self):
self.assertEqual(math.ceil(self.number), 4);
if __name__ == "__main__":
print ("running unittests for math")
unittest.main();
| Python | 0.000099 | |
5d2af781b84676815a2e742bf1acc4c5633ed46e | Create exponential_weathering_integrated.py | landlab/components/weathering/exponential_weathering_integrated.py | landlab/components/weathering/exponential_weathering_integrated.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Created on Fri Apr 8 08:32:48 2016.
@author: RCGlade
Integrated version created by D. Ward on Tue Oct 27 2020
"""
import numpy as np
from landlab import Component
class ExponentialWeathererIntegrated(Component):
r"""
This component implements exponential weathering of bedrock on hillslopes.
Uses exponential soil production function in the style of Ahnert (1976).
Consider that :math:`w_0` is the maximum soil production rate and
that :math:`d^*` is the characteristic soil production depth. The
soil production rate :math:`w` is given as a function of the soil
depth :math:`d`,
.. math::
w = w_0 \exp{-\frac{d}{d^*}} \;.
The `ExponentialWeatherer` only calculates soil production at core nodes.
The `ExponentialWeathererIntegrated` uses the analytical solution for the
amount of soil produced by an exponential weathering function over a
timestep dt, and returns the thickness of bedrock weathered considering
the reduction in rate over the timestep due to the increasing depth.
This enables accuracy over arbitrarily large timesteps, and better
compatiblity with run_one_step().
- this should maintain the field output behavior of the original, but add a new return field for the weathered thickness.
- density adjustments are expected to be handled outside of this component
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import ExponentialWeatherer
>>> mg = RasterModelGrid((5, 5))
>>> soilz = mg.add_zeros("soil__depth", at="node")
>>> soilrate = mg.add_ones("soil_production__rate", at="node")
>>> expw = ExponentialWeatherer(mg)
>>> expw.calc_soil_prod_rate()
>>> np.allclose(mg.at_node['soil_production__rate'], 1.)
True
References
----------
**Required Software Citation(s) Specific to this Component**
Barnhart, K., Glade, R., Shobe, C., Tucker, G. (2019). Terrainbento 1.0: a
Python package for multi-model analysis in long-term drainage basin
evolution. Geoscientific Model Development 12(4), 1267--1297.
https://dx.doi.org/10.5194/gmd-12-1267-2019
**Additional References**
Ahnert, F. (1976). Brief description of a comprehensive three-dimensional
process-response model of landform development Z. Geomorphol. Suppl. 25,
29 - 49.
Armstrong, A. (1976). A three dimensional simulation of slope forms.
Zeitschrift für Geomorphologie 25, 20 - 28.
"""
_name = "ExponentialWeathererIntegrated"
_unit_agnostic = True
_cite_as = """
@article{barnhart2019terrain,
author = {Barnhart, Katherine R and Glade, Rachel C and Shobe, Charles M and Tucker, Gregory E},
title = {{Terrainbento 1.0: a Python package for multi-model analysis in long-term drainage basin evolution}},
doi = {10.5194/gmd-12-1267-2019},
pages = {1267---1297},
number = {4},
volume = {12},
journal = {Geoscientific Model Development},
year = {2019},
}
"""
_info = {
"soil__depth": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Depth of soil or weathered bedrock",
},
"soil_production__rate": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m/yr",
"mapping": "node",
"doc": "rate of soil production at nodes",
},
"soil_production__dt_total_depth": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "thickness of bedrock weathered at nodes",
}
def __init__(
self, grid, soil_production__maximum_rate=1.0, soil_production__decay_depth=1.0
):
"""
Parameters
----------
grid: ModelGrid
Landlab ModelGrid object
soil_production__maximum_rate : float
Maximum weathering rate for bare bedrock
soil_production__decay_depth : float
Characteristic weathering depth
"""
super().__init__(grid)
# Store grid and parameters
self._wstar = soil_production__decay_depth
self._w0 = soil_production__maximum_rate
# Create fields:
# soil depth
self._depth = grid.at_node["soil__depth"]
# weathering rate
if "soil_production__rate" in grid.at_node:
self._soil_prod_rate = grid.at_node["soil_production__rate"]
else:
self._soil_prod_rate = grid.add_zeros("soil_production__rate", at="node")
# weathering total over dt
if "soil_production__dt_total_depth" in grid.at_node:
self._soil_prod_rate = grid.at_node["soil_production__dt_total_depth"]
else:
self._soil_prod_rate = grid.add_zeros("soil_production__dt_total_depth", at="node")
def calc_soil_prod_rate(self):
"""Calculate soil production rate."""
# apply exponential function
self._soil_prod_rate[self._grid.core_nodes] = self._w0 * np.exp(
-self._depth[self._grid.core_nodes] / self._wstar
)
def calc_dt_production_total(self,dt):
"""Calculate integrated production over 1 timestep dt"""
def run_one_step(self,dt):
"""
Parameters
----------
dt: float
Used only for compatibility with standard run_one_step.
"""
self.calc_soil_prod_rate()
self.calc_dt_production_total(dt)
@property
def maximum_weathering_rate(self):
"""Maximum rate of weathering (m/yr)."""
return self._w0
@maximum_weathering_rate.setter
def maximum_weathering_rate(self, new_val):
if new_val <= 0:
raise ValueError("Maximum weathering rate must be positive.")
self._w0 = new_val
| Python | 0.002747 | |
44130357b98001790547d53b7e1080e79842a058 | add group recorded | test_add_group.py | test_add_group.py | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/group.php")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("new")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("new")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("new")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
4569c22d2d0245641e0c2696f798f273405c6bee | Test recorded and exported to the project | test_add_group.py | test_add_group.py | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group_002")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("another_header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("another_footer")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
1ce8285228c29370ad4230f7968abdd7436ff250 | update nth stair | IK/DP/nth_stair.py | IK/DP/nth_stair.py | # http://www.geeksforgeeks.org/count-ways-reach-nth-stair/
# This problem is simpl extension of Fibonacci Number
# Case 1 when person can take 1 or 2 steps
def fibonacci_number(n):
if n <= 1:
return n
return fibonacci_number(n-1) + fibonacci_number(n-2)
def count_ways(s):
# ways(1) = fib(2) = 1
# ways(2) = fib(3) = 2
# ways(3) = fib(4) = 3
return fibonacci_number(s+1)
# generalized version
# if person can take m steps (1,2,3.....m-1,m)
def count_ways_util(n,m):
if n <= 1:
return n
res = 0
i = 1
while i <= m and i <= n:
res += count_ways_util(n-i,m)
i += 1
return res
def count_ways_generalize(s,m):
return count_ways_util(s+1,m)
if __name__ == "__main__":
print("Number of ways: ",count_ways_generalize(4,2))
| Python | 0.000001 | |
d5a42bd23e7227e041aa3d748765b056e3294a0d | Create infogan.py | InfoGAN/infogan.py | InfoGAN/infogan.py | # initial python file
| Python | 0 | |
af5c39347863f2804bb1e36cb0bf6f1a049530c2 | add 15-26 | src/training/Core2/Chapter15RegularExpressions/exercise15_26.py | src/training/Core2/Chapter15RegularExpressions/exercise15_26.py | import re
def replace_email(a_string, new_email):
return re.sub('\w+@\w+\.\w+', new_email, a_string)
if __name__ == '__main__':
assert 'wd@wd.wd xx wd@wd.wd b' == replace_email('abc@126.com xx a@133.com b', 'wd@wd.wd')
assert 'abb' == replace_email('abb', 'wd@wd.wd')
print 'all passed.'
| Python | 0.999988 | |
f730a8cfd6700eeedf1cbcc5df8b3b97f918f0fa | Add filterset for tag page, refs #450 | grouprise/features/tags/filters.py | grouprise/features/tags/filters.py | from django.forms.widgets import CheckboxInput
from django_filters import BooleanFilter
from django_filters.widgets import BooleanWidget
from grouprise.features.associations.filters import ContentFilterSet
class TagContentFilterSet(ContentFilterSet):
tagged_only = BooleanFilter(
label='nur verschlagwortete Beiträge', widget=CheckboxInput,
method='filter_tagged_only')
def __init__(self, *args, tag=None, **kwargs):
self.tag = tag
super().__init__(*args, **kwargs)
def filter_tagged_only(self, queryset, name, value):
if value:
queryset = queryset.filter(content__taggeds__tag=self.tag)
return queryset
| Python | 0 | |
42c7db3f9422d38b0d7273ad8f95db8183b69a9c | Add a python version of the lineset_test ... it demonstrates how one has to run eve from python. | tutorials/eve/lineset_test.py | tutorials/eve/lineset_test.py | ## Translated from 'lineset_test.C'.
## Run as: python -i lineset_test.py
import ROOT
ROOT.PyConfig.GUIThreadScheduleOnce += [ ROOT.TEveManager.Create ]
def lineset_test(nlines = 40, nmarkers = 4):
r = ROOT.TRandom(0)
s = 100
ls = ROOT.TEveStraightLineSet()
for i in range(nlines):
ls.AddLine( r.Uniform(-s,s), r.Uniform(-s,s), r.Uniform(-s,s) ,
r.Uniform(-s,s), r.Uniform(-s,s), r.Uniform(-s,s))
nm = int(nmarkers*r.Rndm())
for m in range(nm):
ls.AddMarker( i, r.Rndm() )
ls.SetMarkerSize(1.5)
ls.SetMarkerStyle(4)
ROOT.gEve.AddElement(ls)
ROOT.gEve.Redraw3D()
return ls
if __name__=='__main__':
ROOT.PyGUIThread.finishSchedule()
lineset_test()
| Python | 0.000423 | |
73f3fa2657485c4fce812f67c3430be553307413 | Include fixtures for setting up the database | tests/conftest.py | tests/conftest.py | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import random
import string
import alembic.config
import alembic.command
import pytest
import sqlalchemy
import sqlalchemy.pool
from six.moves import urllib_parse
def pytest_addoption(parser):
group = parser.getgroup("warehouse")
group._addoption(
"--database-url",
default=None,
help="The url to connect when creating the test database.",
)
parser.addini(
"database_url",
"The url to connect when creating the test database.",
)
@pytest.fixture(scope="session")
def _database(request):
from warehouse.application import Warehouse
def _get_name():
tag = "".join(
random.choice(string.ascii_lowercase + string.digits)
for x in range(7)
)
return "warehousetest_{}".format(tag)
def _check_name(engine, name):
with engine.connect() as conn:
results = conn.execute(
"SELECT datname FROM pg_database WHERE datistemplate = false"
)
return name not in [r[0] for r in results]
database_url_ini = request.config.getini("database_url")
database_url_option = request.config.getvalue("database_url")
if not database_url_ini and not database_url_option:
pytest.skip("No database provided")
# Configure our engine so that we can create a database
database_url = database_url_option or database_url_ini
engine = sqlalchemy.create_engine(
database_url,
isolation_level="AUTOCOMMIT",
poolclass=sqlalchemy.pool.NullPool
)
# Make a random database name that doesn't exist
name = _get_name()
while not _check_name(engine, name):
name = _get_name()
# Create the database
with engine.connect() as conn:
conn.execute("CREATE DATABASE {}".format(name))
# Create a new database_url with the name replaced
parsed = urllib_parse.urlparse(database_url)
test_database_url = urllib_parse.urlunparse(
parsed[:2] + ("/" + name,) + parsed[3:]
)
# Create the database schema
test_engine = sqlalchemy.create_engine(
test_database_url,
poolclass=sqlalchemy.pool.NullPool,
)
app = Warehouse.from_yaml(
override={"database": {"url": test_database_url}},
engine=test_engine,
)
with app.engine.connect() as conn:
conn.execute("CREATE EXTENSION IF NOT EXISTS citext")
alembic_cfg = alembic.config.Config()
alembic_cfg.set_main_option(
"script_location",
app.config.database.migrations,
)
alembic_cfg.set_main_option("url", app.config.database.url)
alembic.command.upgrade(alembic_cfg, "head")
test_engine.dispose()
# Drop the database at the end of the session
def _drop_database():
with engine.connect() as conn:
# Terminate all open connections to the test database
conn.execute(
"""SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE datname = %s
""",
[name],
)
conn.execute("DROP DATABASE {}".format(name))
request.addfinalizer(_drop_database)
return test_database_url
@pytest.fixture
def database(request, _database):
# Create our engine
engine = sqlalchemy.create_engine(
_database,
poolclass=sqlalchemy.pool.AssertionPool,
)
# Get a connection to the database
connection = engine.connect()
connection.connect = lambda: connection
# Start a transaction
transaction = connection.begin()
# Register a finalizer that will rollback the transaction and close the
# connections
def _end():
transaction.rollback()
connection.close()
engine.dispose()
request.addfinalizer(_end)
return connection
| Python | 0 | |
f681f6ac7764b0944434c69febb2b3b778f2aad7 | add 2 | leetcode/2.py | leetcode/2.py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if l1 == None: return l2
if l2 == None: return l1
head = res = ListNode(0)
carrier = 0
while l1 != None and l2 != None:
val = l1.val + l2.val + carrier
carrier = val / 10
val = val % 10
head.next = ListNode(val)
head = head.next
l1 = l1.next
l2 = l2.next
while l1 != None:
val = l1.val + carrier
carrier = val / 10
val = val % 10
head.next = ListNode(val)
head = head.next
l1 = l1.next
while l2 != None:
val = l2.val + carrier
carrier = val / 10
val = val % 10
head.next = ListNode(val)
head = head.next
l2 = l2.next
if carrier != 0:
head.next = ListNode(carrier)
return res.next | Python | 0.999999 | |
45686564547ccf1f40516d2ecbcf550bb904d59c | Create lc1032.py | LeetCode/lc1032.py | LeetCode/lc1032.py | import queue
class Node:
def __init__(self, s=''):
self.s = s
self.end = False
self.fail = None
self.children = None
def get(self, index):
if self.children == None:
return None
return self.children[index]
def set(self, index, node):
if self.children == None:
self.children = [None for _ in range(26)]
self.children[index] = node
def add(self, index):
if self.children == None:
self.children = [None for _ in range(26)]
if self.children[index] == None:
self.children[index] = Node(self.s + chr(ord('a') + index))
return self.children[index]
def next(self, index):
p = self
while p.get(index) == None:
#print("fail", p, p.fail)
p = p.fail
#print(p, p.get(index))
return p.get(index)
def buildFail(root):
q = queue.Queue()
q.put(root)
while not q.empty():
node = q.get()
#print("node", node.s)
if node.children == None:
continue
for i in range(26):
cnode = node.get(i)
if cnode == None:
continue
#print("cnode:", cnode.s)
if node == root:
cnode.fail = root
else:
p = node.fail
while p != None:
if p.get(i) == None:
p = p.fail
else:
break
if p == None:
cnode.fail = root
else:
cnode.fail = p.get(i)
#print("cnode fail:", cnode.s, cnode.fail.s)
if cnode.end == False and cnode.fail.end == True:
cnode.end = True
q.put(cnode)
for i in range(26):
if root.get(i) == None:
root.set(i, root)
root.fail = root
def c2i(c):
return ord(c) - ord('a')
class StreamChecker:
def __init__(self, words: List[str]):
self.words = words
root = Node()
for i in range(len(words)):
p = root
for j in range(len(words[i])):
p = p.add(c2i(words[i][j]))
p.end = True
#print(root)
buildFail(root)
self.cur = root
def query(self, letter: str) -> bool:
#print('cur', self.cur, letter)
self.cur = self.cur.next(c2i(letter))
#print("end", self.cur.end)
return self.cur.end
# Your StreamChecker object will be instantiated and called as such:
# obj = StreamChecker(words)
# param_1 = obj.query(letter)
| Python | 0.000002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.