commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
6e199bec3816a4a36d891e72f8de9819848bda65 | Define ResourceDuplicatedDefinedError. | soasme/electro | electro/errors.py | electro/errors.py | # -*- coding: utf-8 -*-
class ResourceDuplicatedDefinedError(Exception):
pass
| mit | Python | |
f527eeb4792ea5630965d72ae73b0331fd465dea | add indicator migration | mercycorps/TolaActivity,toladata/TolaActivity,mercycorps/TolaActivity,toladata/TolaActivity,mercycorps/TolaActivity,mercycorps/TolaActivity,toladata/TolaActivity,toladata/TolaActivity | indicators/migrations/0002_auto_20170105_0205.py | indicators/migrations/0002_auto_20170105_0205.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-01-05 10:05
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('indicators', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='collecteddata',
name='achieved',
field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Achieved'),
),
migrations.AlterField(
model_name='collecteddata',
name='targeted',
field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Targeted'),
),
migrations.AlterField(
model_name='historicalcollecteddata',
name='achieved',
field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Achieved'),
),
migrations.AlterField(
model_name='historicalcollecteddata',
name='targeted',
field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Targeted'),
),
]
| apache-2.0 | Python | |
103de382d7c9c0dde7aa4bc2f4756dc71ee45335 | define pytest fixture for path to PR2 database | hurwitzlab/muscope-18SV4,hurwitzlab/muscope-18SV4 | test/conftest.py | test/conftest.py | # content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--uchime-ref-db-fp", action="store", help="path to PR2 database")
@pytest.fixture
def uchime_ref_db_fp(request):
return request.config.getoption("--uchime-ref-db-fp")
| mit | Python | |
d15564cf234def0f37c958915e0d7a99cad439e4 | add a test for overflow | kived/pyjnius,jk1ng/pyjnius,Konubinix/pyjnius,niavlys/pyjnius,ibobalo/pyjnius,physion/pyjnius,benson-basis/pyjnius,kived/pyjnius,ibobalo/pyjnius,physion/pyjnius,kivy/pyjnius,benson-basis/pyjnius,jk1ng/pyjnius,aolihu/pyjnius,jaykwon/pyjnius,jelford/pyjnius,Konubinix/pyjnius,aolihu/pyjnius,kivy/pyjnius,kivy/pyjnius,physion/pyjnius,jelford/pyjnius,pombredanne/pyjnius,niavlys/pyjnius,pombredanne/pyjnius,jaykwon/pyjnius | tests/test_jnitable_overflow.py | tests/test_jnitable_overflow.py | # run it, and check with Java VisualVM if we are eating too much memory or not!
from jnius import autoclass
Stack = autoclass('java.util.Stack')
i = 0
while True:
i += 1
stack = Stack()
stack.push('hello')
| mit | Python | |
5d3918c885f430e79e8283533ad5eb3a84ffecc7 | Add migration code for updating lease status | openstack/blazar,stackforge/blazar,openstack/blazar,ChameleonCloud/blazar,ChameleonCloud/blazar,stackforge/blazar | blazar/db/migration/alembic_migrations/versions/75a74e4539cb_update_lease_status.py | blazar/db/migration/alembic_migrations/versions/75a74e4539cb_update_lease_status.py | # Copyright 2018 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""update lease status
Revision ID: 75a74e4539cb
Revises: e66f199a5414
Create Date: 2018-01-23 11:05:56.753579
"""
# revision identifiers, used by Alembic.
revision = '75a74e4539cb'
down_revision = 'e66f199a5414'
from blazar.db import api as db_api
from blazar.status import LeaseStatus as ls
def upgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': ls.derive_stable_status(lease['id'])})
def downgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': None})
| apache-2.0 | Python | |
308e34b686686d3c42466012c864d7cc5d0f6799 | Create go_fixup_fptrs.py | williballenthin/idawilli | scripts/go/go_fixup_fptrs.py | scripts/go/go_fixup_fptrs.py | """
when IDA's auto-discovery of functions in 64-bit Windows Go executables fails,
scan for global (.rdata) pointers into the code section (.text) and assume these are function pointers.
"""
import idc
import ida_name
import ida_auto
import ida_bytes
import idautils
def enum_segments():
for segstart in idautils.Segments():
segend = idc.get_segm_end(segstart)
segname = idc.get_segm_name(segstart)
yield segstart, segend, segname
def find_pointers(start, end):
for va in range(start, end-0x8):
ptr = ida_bytes.get_qword(va)
if idc.get_segm_start(ptr) == idc.BADADDR:
continue
yield va, ptr
def is_head(va):
return ida_bytes.is_head(idc.get_full_flags(va))
def get_head(va):
if is_head(va):
return va
else:
return idc.prev_head(va)
def is_code(va):
if is_head(va):
flags = idc.get_full_flags(va)
return ida_bytes.is_code(flags)
else:
head = get_head(va)
return is_code(head)
def is_unknown(va):
return ida_bytes.is_unknown(idc.get_full_flags(va))
def main():
for segstart, segend, segname in enum_segments():
if segname not in ('.rdata', ):
continue
for src, dst in find_pointers(segstart, segend):
if idc.get_segm_name(dst) != ".text":
continue
if is_code(dst):
continue
print("new function pointer: 0x%x -> 0x%x" % (src, dst))
ida_auto.auto_make_code(dst)
ida_auto.auto_make_proc(dst)
ida_bytes.del_items(src, 8)
ida_bytes.create_data(src, idc.FF_QWORD, 8, idc.BADADDR)
# this doesn't seem to always work :-(
idc.op_plain_offset(src, -1, 0)
ida_name.set_name(src, "j_%s_%x" % (src, dst))
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
08fcba713315b4ac29ed30f437b7c5c0b1da5a9d | Create make_upper_case.py | joshavenue/python_notebook | make_upper_case.py | make_upper_case.py | def sillycase(string):
half = round(len(string)/2) // Find the half index
return string[:half].lower() + string[half:].upper() // If you only want certain letters to be upper case //
| unlicense | Python | |
9e986214aaf6beef5b1778254cc348006a828c04 | Create MaximalSquare_001.py | Chasego/codirit,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/codirit,cc13ny/algo,cc13ny/algo,Chasego/codirit,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,Chasego/codi,cc13ny/algo,cc13ny/Allin,Chasego/codi,Chasego/codi,cc13ny/Allin,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/codi,Chasego/cod,Chasego/cod,Chasego/cod | leetcode/221-Maximal-Square/MaximalSquare_001.py | leetcode/221-Maximal-Square/MaximalSquare_001.py | # brute force, optimized later
class Solution(object):
def maximalSquare(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if len(matrix) == 0 or len(matrix[0]) == 0:
return 0
maxv = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == '1':
area = self.getArea(i, j, matrix)
if area > maxv:
maxv = area
return maxv
def getArea(self, i, j, matrix):
m, n = len(matrix), len(matrix[0])
length, flag = 1, False
for l in range(1, min(m - i, n - j)):
#print 'len: ' + str(l)
for k in range(j, j + l + 1):
#print i + 1, k
if matrix[i + l][k] == '0':
flag = True
break
for k in range(i, i + l + 1):
#print k, j + 1
if matrix[k][j + l] == '0':
flag = True
break
if flag:
break
length += 1
#print i, j, length * length
return length * length
| mit | Python | |
6b6c5b836b282c53fc5a337942d187769d0a87ed | Add cli module. | liwushuo/fapistrano | fapistrano/cli.py | fapistrano/cli.py | # -*- coding: utf-8 -*-
import click
import yaml
from fabric.api import env as fabenv, local, execute
from fapistrano.app import init_cli
from fapistrano.utils import with_configs, register_role, register_env, _apply_env_role_config
from fapistrano import deploy
@click.group()
@click.option('-d', '--deployfile', default='deploy.yml')
def fap(deployfile):
with open(deployfile, 'rb') as f:
conf = yaml.load(f.read())
init_cli(conf)
@fap.command()
@click.option('-r', '--role', required=True, help='deploy role, for example: production, staging')
@click.option('-e', '--env', required=True, help='deploy env, for example: app, worker, cron')
def release(role, env):
fabenv.role = role
fabenv.env = env
_apply_env_role_config()
execute(deploy.release)
@fap.command()
@click.option('-r', '--role', required=True, help='deploy role, for example: production, staging')
@click.option('-e', '--env', required=True, help='deploy env, for example: app, worker, cron')
def rollback(role, env):
fabenv.role = role
fabenv.env = env
_apply_env_role_config()
execute(deploy.rollback)
@fap.command()
@click.option('-r', '--role', required=True, help='deploy role, for example: production, staging')
@click.option('-e', '--env', required=True, help='deploy env, for example: app, worker, cron')
def restart(role, env):
fabenv.role = role
fabenv.env = env
_apply_env_role_config()
execute(deploy.rollback)
if __name__ == '__main__':
fap()
| mit | Python | |
9341d2192da8cbaea734641aec9567a1035aa1ee | Add suffix list | brinchj/RndPhrase,brinchj/RndPhrase,brinchj/RndPhrase | scripts/update-suffixlist.py | scripts/update-suffixlist.py | #!/usr/bin/env python
import os
import urllib2 as urllib
import anyjson as json
URL_LIST = "http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/src/effective_tld_names.dat?raw=1"
# generate json
print 'downloading suffix list..'
rules = {}
lst = urllib.urlopen(URL_LIST).read()
print 'processing list..'
lines = lst.split('\n')
for i,line in enumerate(lines):
if line[:2] == '//' or len(line) == 0:
continue # skip comments
EXCEPT = line[0] == '!'
if EXCEPT: # exception rule
line = line[1:]
doms = line.split('.')
lst = rules
# find node to update
for d in reversed(doms):
node = lst.get(d, None)
if not node:
node = {}
lst[d] = node
lst = node
if EXCEPT:
lst['!'] = 1;
json = json.serialize(rules).replace(' ','')
# functions for checking domains
def get_reg_domain(rules, doms):
node = rules.get(doms[0],None)
if node == None: node = rules.get('*',None)
if node == None or (len(node) == 1 and node['!'] == 1):
return doms[0]
elif len(doms) == 1:
return None
reg = get_reg_domain(node, doms[1:])
if(reg != None):
return '%s.%s' % (reg, doms[0])
def get_host(domain):
doms = list(reversed(domain.split('.')))
return get_reg_domain(rules, doms)
# test the list
print 'testing list..'
tests = {'qwe.parliament.co.uk': 'parliament.co.uk',
'foo.bar.version2.dk': 'version2.dk',
'ecs.soton.ac.uk': 'soton.ac.uk'}
for (test,res) in tests.items():
assert get_host(test) == res
# output new list as javascript
print 'writing list..'
file('../data/suffix-list.js','w').write('suffix_list=%s;' % json);
print 'done.'
| bsd-2-clause | Python | |
a1d95beccd0f0f332005cd133bdd660fbe649467 | Add a benchmarking script. | materialsvirtuallab/pyhull,materialsvirtuallab/pyhull,materialsvirtuallab/pyhull,materialsvirtuallab/pyhull | benchmarking/perf_cmp.py | benchmarking/perf_cmp.py | #!/usr/bin/env python
"""
TODO: Change the module doc.
"""
from __future__ import division
__author__ = "shyuepingong"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Beta"
__date__ = "11/19/12"
import numpy as np
from scipy.spatial import Delaunay
from pyhull.qconvex import get_vertices
from pymatgen.command_line.qhull_caller import qconvex
data = np.random.randn(100,3)
def scipy_test():
return Delaunay(data).convex_hull
def pyhull_test():
return get_vertices(data)
def pymatgen_ext_test():
return qconvex(data)
if __name__ == "__main__":
import timeit
print "Scipy results"
print timeit.timeit("scipy_test()",
setup="from __main__ import scipy_test",
number=1)
print
print "pymatgen_ext_test results"
print timeit.timeit("pymatgen_ext_test()",
setup="from __main__ import pymatgen_ext_test",
number=1)
print
print "pyhull results"
print timeit.timeit("pyhull_test()",
setup="from __main__ import pyhull_test",
number=1)
print | mit | Python | |
8de30c6d4b5784af406d75e04feeb7c6431243d6 | add fermi setup | ceb8/astroquery,imbasimba/astroquery,imbasimba/astroquery,ceb8/astroquery | astroquery/fermi/setup_package.py | astroquery/fermi/setup_package.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
def get_package_data():
paths_test = [os.path.join('data', '*.html')]
return {
'astroquery.fermi.tests': paths_test,
}
| bsd-3-clause | Python | |
fc32e0d04569dab20cfc1b3b991bfbd8b067d62e | Add the manager to persist the data received from the OSM adapters | SeGarVi/moveon-web,SeGarVi/moveon-web,SeGarVi/moveon-web | moveon/managers.py | moveon/managers.py | from django.db import models
from moveon.models import Line, Station, Node
class OSMLineManager(models.Manager):
def __init__(self, osmline):
self.osmline = osmline
self.stations = dict()
self.routes = []
self.nodes = dict()
self.stretches = dict()
def save(self):
self._save_line()
self._save_stations()
self._assign_stations_to_line()
self._save_nodes()
self._save_routes()
self._create_default_stretches()
self._save_route_points()
def _save_line(self)
companymanager = CompanyManager()
company = companymanager.get_by_id(self.osmline['company'])
transportmanager = TransportManager()
transport = transportmanager.get_by_id(self.osmline['transport'])
self.line = Line.from_osm_adapter_data(self.osmlines)
self.line.company = company
self.line.transport = transport
self.line.save()
def _save_stations(self):
stationmanager = StationManager()
for osmstation in self.osmline['stations']:
station = stationmanager.get_by_id(osmstation['osmid'])
if not station:
station = Station.from_osm_adapter_data(osmstation)
station.save()
self.stations[station.osmid] = station
def _assign_stations_to_line(self):
self.line.stations = self.stations.values()
self.line.save()
def _save_nodes(self)
nodemanager = NodeManager()
for osmnode in self.osmline['route_points']:
node = nodemanager.get_by_id(osmnode['osmid'])
if not node:
node = Node.from_osm_adapter_data(osmnode)
if 'near_station' in osmnode:
node.near_station = self.stations[osmnode['near_station']]
node.save()
self.nodes[node.osmid] = node
def _save_routes(self):
for osmroute in self.osmline['routes']:
route = Route.from_osm_adapter_data(osmroute)
route.line = self.line
route.save()
self.routes.append(route)
def _create_default_stretches(self):
for route in self.routes
stretch = Stretch()
stretch.route = route
stretch.save()
self.stretches[route.osmid] = stretch
def _save_route_points(self):
for osmroute in self.osmline['routes']:
for osmroutepoint in osmroute['route_points']
routepoint = RoutePoint.from_osm_adapter_data(osmroutepoint)
routepoint.node = self.nodes[osmroutepoint['node_id']]
routepoint.node = self.stretches[osmroute['osmid']]
routepoint.save()
class CompanyManager(models.Manager):
def get_by_id(self, company_id):
return self.get(id = company_id)
class TransportManager(models.Manager):
def get_by_id(self, transport_id):
return self.get(id = transport_id)
class StationManager(models.Manager):
def get_by_id(self, station_id):
return self.get(id = station_id)
class LineManager(models.Manager):
class RouteManager(models.Manager):
class TimeManager(models.Manager):
class TimeTableManager(models.Manager):
class NodeManager(models.Manager):
def get_by_id(self, station_id):
return self.get(id = station_id)
class StretchManager(models.Manager):
class RoutePointManager(models.Manager):
| agpl-3.0 | Python | |
59a05f592ffc4423023f1803efcf427896ab5d41 | Add lc0695_max_area_of_island.py | bowen0701/algorithms_data_structures | lc0695_max_area_of_island.py | lc0695_max_area_of_island.py | """Leetcode 695. Max Area of Island
Medium
URL: https://leetcode.com/problems/max-area-of-island/
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's
(representing land) connected 4-directionally (horizontal or vertical.)
You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array.
(If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11,
because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
"""
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
b8b191a380ef4ab0701793c2e0ac664b05c4c505 | Add simple word2vec to train model | chuajiesheng/twitter-sentiment-analysis | analysis/word2vec.py | analysis/word2vec.py | import numpy as np
import re
from nltk.corpus import stopwords
import nltk
import logging
from gensim.models import word2vec
def get_dataset():
files = ['./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt']
x = []
for file in files:
s = []
with open(file, 'r') as f:
for line in f:
s.append(line.strip())
assert len(s) == 1367
x.extend(s)
y = np.array([-1] * 1367 + [0] * 1367 + [1] * 1367)
return x, y
def sentence_to_wordlist(sentence, remove_stopwords=False):
review_text = re.sub('[^a-zA-Z]', ' ', sentence)
words = review_text.lower().split()
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
return words
def tweet_to_sentences(review, tokenizer, remove_stopwords=False):
raw_sentences = tokenizer.tokenize(review.strip())
sentences = []
for raw_sentence in raw_sentences:
if len(raw_sentence) > 0:
sentences.append(sentence_to_wordlist(raw_sentence, remove_stopwords))
return sentences
punkt_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
X, Y = get_dataset()
sentences = []
print('Parsing sentences from training set')
for tweet in X:
sentences += tweet_to_sentences(tweet, punkt_tokenizer)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
num_features = 300 # Word vector dimensionality
min_word_count = 10 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
print('Training model...')
model = word2vec.Word2Vec(sentences, workers=num_workers, size=num_features, min_count=min_word_count, window=context, sample=downsampling)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = '300features_40minwords_10context'
model.save(model_name)
import code; code.interact(local=dict(globals(), **locals()))
| apache-2.0 | Python | |
a0333aa80dd6a6baeb24e32deeecd0288419328e | Initialize P3_seatingCards | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter17/PracticeProjects/P3_seatingCards.py | books/AutomateTheBoringStuffWithPython/Chapter17/PracticeProjects/P3_seatingCards.py | # Chapter 13 included a practice project to create custom invitations from a list of
# guests in a plaintext file. As an additional project, use the pillow module to
# create images for custom seating cards for your guests. For each of the guests listed
# in the guests.txt, generate an image file with the guest name and some flowery
# decoration.
#
# To ensure that each seating card is the same size, add a black rectangle on the edges
# of the invitation image so that when the image is printed out, there will be a
# guideline for cutting. The PNG files that Pillow produces are set to 72 pixels per
# inch, so a 4×5-inch card would require a 288×360-pixel image.
| mit | Python | |
1e45df8375c4e72257defc82137fa570fbb44249 | add StringOperation to repository | liu0g/python_web,liu0g/python_web | StringOperation.py | StringOperation.py | #encoding = utf-8
__author__ = 'lg'
list1 = ['java','python','ruby','perl','mac']
list2 = ['linux','mac','windows','ruby']
#两个list的交集(法一) 时间复杂度为O(n^2)
def intersect(a,b):
listRes = []
for i in range(len(a)):
for j in range(len(b)):
if a[i] == b[j]:
if a[i] not in listRes:
listRes.append(a[i])
return listRes
#两个list的交集(法二) 时间复杂度为O(n)
def intersect_1(a,b):
listRes = []
for i in range(len(a)):
if a[i] in b:
if a[i] not in listRes:
listRes.append(a[i])
return listRes
#两个list的差集
def minus(a,b):
listRes = []
for i in range(len(a)):
if a[i] not in b:
listRes.append(a[i])
return listRes
# 按字母表输出字符串
def alphabet_output(listPram):
sortedList = []
for i in range(len(listPram)):
sortedStrRes = ''
sortedStrList = sorted(listPram[i])
for j in range(len(sortedStrList)):
sortedStrRes += sortedStrList[j]
sortedList.append(sortedStrRes)
print(sortedList)
# list1 intersect list2
intersectList = intersect_1(list1,list2)
alphabet_output(intersectList)
# list1 minus list2
minusList = minus(list1,list2)
alphabet_output(minusList)
# list2 minus list1
minusList_1 = minus(list2,list1)
alphabet_output(minusList_1)
| mit | Python | |
59d55a5911e99a0886b8c3cc48ee92f247e96e0a | add Voronoi | PKU-Dragon-Team/Datalab-Utilities | Voronoi/Voronoi.py | Voronoi/Voronoi.py | import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi, voronoi_plot_2d
import csv
COUNT_LIMIT = None
SAMPLE_LIMIT = 100
Points = []
with open('cell_info.csv', 'r', encoding='utf_8') as obj_file:
csv_file = csv.reader(obj_file)
for cnt, line in enumerate(csv_file):
if COUNT_LIMIT and cnt >= COUNT_LIMIT: # 只读取前 k 个点
break
Points.append([float(line[1]), float(line[2])]) # make up data points
# 随机抽取 n 个点
if SAMPLE_LIMIT:
points = np.array([Points[i] for i in np.random.choice(len(Points), size = SAMPLE_LIMIT)])
else:
points = np.array(Points)
# compute Voronoi tesselation
vor = Voronoi(points)
# plot
voronoi_plot_2d(vor)
# colorize
for region in vor.regions:
if not -1 in region:
polygon = [vor.vertices[i] for i in region]
plt.fill(*zip(*polygon))
plt.show() | mit | Python | |
de0265b609ab56035544018e368a108b573ae503 | define the index of filters to prune by examining the classification activations | shuang1330/tf-faster-rcnn,shuang1330/tf-faster-rcnn,shuang1330/tf-faster-rcnn,shuang1330/tf-faster-rcnn | tools/prune_with_classification_guidance.py | tools/prune_with_classification_guidance.py | import os.path
import numpy as np
# define th CLASSES and indices
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
class_to_ind = dict(list(zip(CLASSES, list(range(len(CLASSES))))))
SUB_CLAS = ('bicycle', 'bus', 'car','motorbike', 'person', 'train')
# define path for loading the activations_versus_classes array
hm_path = './activations_res/res.npy'
def rankmin(x):
u, inv, counts = np.unique(x, return_inverse=True, return_counts=True)
csum = np.zeros_like(counts)
csum[1:] = counts[:-1].cumsum()
return csum[inv]
def list_normalizer(ori_list):
max_val = ori_list.max()
min_val = ori_list.min()
if max_val == 0:
return ori_list
normalized_list = [(i-min_val)/(max_val-min_val) for i in ori_list]
return normalized_list
def detect_diff_one_layer(norm_hm_one_layer):
interest_average = np.zeros((norm_hm_one_layer.shape[1],))
diff_ind = np.zeros((norm_hm_one_layer.shape[1],))
amplifier = 10
for clas in SUB_CLAS:
ind = class_to_ind[clas]
interest_average[:] += norm_hm_one_layer[ind]
interest_average = interest_average/len(SUB_CLAS)
for clas in CLASSES:
if clas not in SUB_CLAS:
ind = class_to_ind[clas]
temp = amplifier*(norm_hm_one_layer[ind]-interest_average)
# print 'max: %d,min: %d'%(temp.max(),temp.min())
temp[temp<0.5] = 0
temp[temp>0.5] = 1
# diff_ind[clas] = np.argsort(temp)
diff_ind += temp
# diff_ind = np.argsort(diff_ind)
return diff_ind
def detect_diff_all(hm_path):
hm_all = np.load(hm_path).item()
norm_hm_all = {}
hm_ind = {} # dictionary to record the diff_ind for every layer
sub_clas_index = [class_to_ind[i] for i in SUB_CLAS]
for key in hm_all: # for evey layer
norm_hm_all[key] = np.zeros(hm_all[key].shape,np.float32)
for i,sub_list in enumerate(hm_all[key]): # for every row in the layer
norm_hm_all[key][i,:] = list_normalizer(sub_list)
hm_ind[key] = detect_diff_one_layer(norm_hm_all[key]) # [21, 64/...]
return hm_ind
if __name__=='__main__':
hm_sorted = detect_diff_all(hm_path)
for key in hm_sorted:
print key, np.count_nonzero(hm_sorted[key])
| mit | Python | |
5068c02e50c54c08a6991e45584c6c9b9bdd5dba | add import script for Midlothian | chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_midlothian.py | polling_stations/apps/data_collection/management/commands/import_midlothian.py | from data_collection.management.commands import BaseScotlandSpatialHubImporter
class Command(BaseScotlandSpatialHubImporter):
council_id = 'S12000019'
council_name = 'Midlothian'
elections = ['local.midlothian.2017-05-04']
def district_record_to_dict(self, record):
code = str(record[0]).strip()
"""
MN4H is represented as a polygon which sits on top of MN4G
(as opposed to being in an InnerRing inside MN4G).
This means any point which is in MN4H is also in MN4G.
Fortunately MN4H and MN4G share the same polling
station, so in this case we can fix it by just not importing MN4G.
If they didn't use the same polling station, this would be an issue.
"""
if code == 'MN4H':
return None
return super().district_record_to_dict(record)
| bsd-3-clause | Python | |
db846aaa0f35e8888b0b3423539c0a70c9ae16fa | Add Source Files | SAP/lumira-extension-da-googledocs | source/GoogleSpreadsheets.py | source/GoogleSpreadsheets.py | # -*- coding: utf-8 -*-
import sys
import requests
import easygui
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
Mode = enum('PREVIEW', 'EDIT', 'REFRESH')
mode = 0
size = 0
params = ''
key = ''
i = 0
for i in range(len(sys.argv)):
if str(sys.argv[i]).lower() == "-mode" and (i + 1) < len(sys.argv):
if str(sys.argv[i + 1]).lower() == "preview":
mode = Mode.PREVIEW
elif str(sys.argv[i + 1]).lower() == "edit":
mode = Mode.EDIT
elif str(sys.argv[i + 1]).lower() == "refresh":
mode = Mode.REFRESH
elif str(sys.argv[i]).lower() == "-size":
size = int(sys.argv[i + 1])
elif str(sys.argv[i]).lower() == "-params":
params = str(sys.argv[i + 1])
paramslist = params.split(';')
for i in range(len(paramslist)):
if paramslist[i].split('=')[0].lower() == 'key':
key = paramslist[i].split('=')[1]
i += 1
i += 1
def printData(key):
if not key == '':
proxies = []
url = ''.join(['https://docs.google.com/spreadsheet/ccc?key=', key, '&output=csv'])
csv = requests.get(url, proxies=proxies, verify=False)
if csv.headers['Content-Type'] == 'text/csv':
data = csv.content
else:
data = """Error
Error In Header"""
else:
data = """Error
Error In Key"""
print "beginDSInfo"
print """fileName;#;true
csv_first_row_has_column_names;true;true;
csv_separator;,;true
csv_number_grouping;,;true
csv_number_decimal;.;true
csv_date_format;d.M.yyyy;true"""
print ''.join(['key;', key, ';true'])
print "endDSInfo"
print "beginData"
print data
print "endData"
if mode == Mode.PREVIEW:
default = ''
key = easygui.enterbox(msg="Enter GDocs Key", title="Google Docs Key", default=default)
key = key or default
printData(key=key)
elif mode == Mode.EDIT:
default = key
key = easygui.enterbox(msg="Edit GDocs Key", title="Google Docs Key", default=default)
key = key or default
printData(key)
elif mode == Mode.REFRESH:
printData(key)
| apache-2.0 | Python | |
c185786a189b2934e69334089e180c725d59a391 | Add a test that exposes the association copy/paste issue | amolenaar/gaphor,amolenaar/gaphor | tests/test_multiple_associations.py | tests/test_multiple_associations.py | """
Test issues where associations are copied and pasted, deleted, etc.
Scenario's:
* Class and association are pasted in a new diagramg
* Class and association are pasted in a new diagram and original association is deleted
* Class and association are pasted in a new diagram and new association is deleted
* Association is pasted in a new diagram and reconnected to the class (same subject as original)
* Association is pasted and directly deleted
* Class and association are pasted in a new diagram and one end is connected to a different class
"""
import pytest
from gaphor import UML
from gaphor.application import Session
from gaphor.diagram.tests.fixtures import connect
from gaphor.UML import diagramitems
from gaphor.UML.modelfactory import set_navigability
@pytest.fixture
def session():
session = Session()
yield session
session.shutdown()
@pytest.fixture
def element_factory(session):
return session.get_service("element_factory")
@pytest.fixture
def copy(session):
return session.get_service("copy")
@pytest.fixture
def diagram(element_factory):
return element_factory.create(UML.Diagram)
@pytest.fixture
def class_and_association_with_copy(diagram, element_factory, copy):
c = diagram.create(
diagramitems.ClassItem, subject=element_factory.create(UML.Class)
)
a = diagram.create(diagramitems.AssociationItem)
connect(a, a.handles()[0], c)
connect(a, a.handles()[1], c)
set_navigability(a.subject, a.subject.memberEnd[0], True)
copy.copy({a, c})
new_diagram = element_factory.create(UML.Diagram)
pasted_items = copy.paste(new_diagram)
aa = pasted_items.pop()
if not isinstance(aa, diagramitems.AssociationItem):
aa = pasted_items.pop()
return c, a, aa
def test_delete_copied_associations(class_and_association_with_copy):
c, a, aa = class_and_association_with_copy
assert a.subject.memberEnd[0].type
assert a.subject.memberEnd[1].type
assert a.subject.memberEnd[0].type is c.subject
assert a.subject.memberEnd[1].type is c.subject
assert a.subject.memberEnd[0] is a.head_end.subject
assert a.subject.memberEnd[1] is a.tail_end.subject
assert a.subject.memberEnd[0] in a.subject.memberEnd[1].type.ownedAttribute
# Delete the copy and all is fine
aa.unlink()
assert a.subject.memberEnd[0].type
assert a.subject.memberEnd[1].type
assert a.subject.memberEnd[0].type is c.subject
assert a.subject.memberEnd[1].type is c.subject
assert a.subject.memberEnd[0] is a.head_end.subject
assert a.subject.memberEnd[1] is a.tail_end.subject
assert a.subject.memberEnd[0] in a.subject.memberEnd[1].type.ownedAttribute
def test_delete_original_association(class_and_association_with_copy):
c, a, aa = class_and_association_with_copy
assert aa.subject.memberEnd[0].type
assert aa.subject.memberEnd[1].type
assert aa.subject.memberEnd[0].type is c.subject
assert aa.subject.memberEnd[1].type is c.subject
assert aa.subject.memberEnd[0] is aa.head_end.subject
assert aa.subject.memberEnd[1] is aa.tail_end.subject
assert aa.subject.memberEnd[0] in aa.subject.memberEnd[1].type.ownedAttribute
# Now, when the original is deleted, the model is changed and made invalid
a.unlink()
assert aa.subject.memberEnd[0].type
assert aa.subject.memberEnd[1].type
assert aa.subject.memberEnd[0].type is c.subject
assert aa.subject.memberEnd[1].type is c.subject
assert aa.subject.memberEnd[0] is aa.head_end.subject
assert aa.subject.memberEnd[1] is aa.tail_end.subject
assert aa.subject.memberEnd[0] in aa.subject.memberEnd[1].type.ownedAttribute
| lgpl-2.1 | Python | |
107f86c8c20c4d7cc4c81db464ac20607bb31ba9 | add DBusTube constants to constants.py | community-ssu/telepathy-gabble,mlundblad/telepathy-gabble,community-ssu/telepathy-gabble,jku/telepathy-gabble,mlundblad/telepathy-gabble,community-ssu/telepathy-gabble,Ziemin/telepathy-gabble,Ziemin/telepathy-gabble,Ziemin/telepathy-gabble,jku/telepathy-gabble,mlundblad/telepathy-gabble,community-ssu/telepathy-gabble,jku/telepathy-gabble,Ziemin/telepathy-gabble | tests/twisted/constants.py | tests/twisted/constants.py | """
Some handy constants for other tests to share and enjoy.
"""
HT_CONTACT = 1
CHANNEL = "org.freedesktop.Telepathy.Channel"
CHANNEL_IFACE_GROUP = CHANNEL + ".Interface.Group"
CHANNEL_TYPE_TUBES = CHANNEL + ".Type.Tubes"
CHANNEL_IFACE_TUBE = CHANNEL + ".Interface.Tube.DRAFT"
CHANNEL_TYPE_STREAM_TUBE = CHANNEL + ".Type.StreamTube.DRAFT"
CHANNEL_TYPE_DBUS_TUBE = CHANNEL + ".Type.DBusTube.DRAFT"
CHANNEL_TYPE = CHANNEL + '.ChannelType'
TARGET_HANDLE_TYPE = CHANNEL + '.TargetHandleType'
TARGET_HANDLE = CHANNEL + '.TargetHandle'
TARGET_ID = CHANNEL + '.TargetID'
REQUESTED = CHANNEL + '.Requested'
INITIATOR_HANDLE = CHANNEL + '.InitiatorHandle'
INITIATOR_ID = CHANNEL + '.InitiatorID'
CONN = "org.freedesktop.Telepathy.Connection"
CONN_IFACE_REQUESTS = CONN + '.Interface.Requests'
ERRORS = 'org.freedesktop.Telepathy.Errors'
INVALID_ARGUMENT = ERRORS + '.InvalidArgument'
NOT_IMPLEMENTED = ERRORS + '.NotImplemented'
NOT_AVAILABLE = ERRORS + '.NotAvailable'
TUBE_PARAMETERS = CHANNEL_IFACE_TUBE + '.Parameters'
TUBE_STATUS = CHANNEL_IFACE_TUBE + '.Status'
STREAM_TUBE_SERVICE = CHANNEL_TYPE_STREAM_TUBE + '.Service'
DBUS_TUBE_SERVICE_NAME = CHANNEL_TYPE_DBUS_TUBE + '.ServiceName'
TUBE_CHANNEL_STATE_LOCAL_PENDING = 0
TUBE_CHANNEL_STATE_REMOTE_PENDING = 1
TUBE_CHANNEL_STATE_OPEN = 2
TUBE_CHANNEL_STATE_NOT_OFFERED = 3
| """
Some handy constants for other tests to share and enjoy.
"""
HT_CONTACT = 1
CHANNEL = "org.freedesktop.Telepathy.Channel"
CHANNEL_IFACE_GROUP = CHANNEL + ".Interface.Group"
CHANNEL_TYPE_TUBES = CHANNEL + ".Type.Tubes"
CHANNEL_IFACE_TUBE = CHANNEL + ".Interface.Tube.DRAFT"
CHANNEL_TYPE_STREAM_TUBE = CHANNEL + ".Type.StreamTube.DRAFT"
CHANNEL_TYPE = CHANNEL + '.ChannelType'
TARGET_HANDLE_TYPE = CHANNEL + '.TargetHandleType'
TARGET_HANDLE = CHANNEL + '.TargetHandle'
TARGET_ID = CHANNEL + '.TargetID'
REQUESTED = CHANNEL + '.Requested'
INITIATOR_HANDLE = CHANNEL + '.InitiatorHandle'
INITIATOR_ID = CHANNEL + '.InitiatorID'
CONN = "org.freedesktop.Telepathy.Connection"
CONN_IFACE_REQUESTS = CONN + '.Interface.Requests'
ERRORS = 'org.freedesktop.Telepathy.Errors'
INVALID_ARGUMENT = ERRORS + '.InvalidArgument'
NOT_IMPLEMENTED = ERRORS + '.NotImplemented'
NOT_AVAILABLE = ERRORS + '.NotAvailable'
TUBE_PARAMETERS = CHANNEL_IFACE_TUBE + '.Parameters'
TUBE_STATUS = CHANNEL_IFACE_TUBE + '.Status'
STREAM_TUBE_SERVICE = CHANNEL_TYPE_STREAM_TUBE + '.Service'
TUBE_CHANNEL_STATE_LOCAL_PENDING = 0
TUBE_CHANNEL_STATE_REMOTE_PENDING = 1
TUBE_CHANNEL_STATE_OPEN = 2
TUBE_CHANNEL_STATE_NOT_OFFERED = 3
| lgpl-2.1 | Python |
b9b246e1feb728a257b343d4a07fc42ba10bac13 | Add a wsgi app to our test tg2 app | ralphbean/moksha,pombredanne/moksha,mokshaproject/moksha,lmacken/moksha,ralphbean/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,lmacken/moksha,pombredanne/moksha,mokshaproject/moksha,pombredanne/moksha,ralphbean/moksha,pombredanne/moksha | moksha/tests/quickstarts/tg2app/tg2app/wsgi.py | moksha/tests/quickstarts/tg2app/tg2app/wsgi.py | import os
from paste.deploy import loadapp
cfg_path = os.path.join(os.path.dirname(__file__), '..', 'development.ini')
application = loadapp('config:' + cfg_path)
| apache-2.0 | Python | |
3299cd9a931e6b564ebb5031a7e515155dab97c9 | Create blast.py | jfoox/venninator | blast.py | blast.py | #! /bin/bash/env python
from Applications import NcbiblastpCommandline
import math
import os
from decimal import *
import settings
added = []
class Blaster(object):
def __init__(self):
pass
def blast(self, evalue):
# create a database and conduct the all-vs-all BLAST search against it
print 'Creating database containing all sequences from all genomes...'
os.system('makeblastdb -in venninator_combined.fa -dbtype prot -out venninator_database')
combinedfile_splits = [splitfile for splitfile in os.listdir('.') if splitfile.startswith('venninator_combined_')]
pctdone = 0
if settings.allvsallfile == '':
print 'Executing all-vs-all BLAST search (this will take a while) ...'
for splitfile in combinedfile_splits:
blastsearch = NcbiblastpCommandline(cmd='blastp', query=splitfile, db='venninator_database', outfmt='"6 qseqid sseqid qcovs evalue"', evalue=settings.evalues[0], max_hsps_per_subject='1', out='out_blastp_' + settings.evalues[0] +'_' + splitfile.split('_')[2] + '.tmp', num_threads=settings.numthreads)
blastsearch()
pctdone += 10
print '%d%% done ...' % pctdone
else:
with open('out_blastp_' + settings.evalues[0] + '_combined.tmp', 'w') as copyfile:
with open(settings.allvsallfile, 'r') as avafile:
for line in avafile:
line = line.strip()
copyfile.write(line + '\n')
def trimmer(self, evalue):
# extract into new file "query, top hit, e-value" from rows that meet conditions (no self-hits; consolidate reciprocal hits; >70% coverage).
print 'Taking all-vs-all BLAST output and trimming (removing duplicates, length below threshold, combining e-values)...'
blastoutput = open('../out_blastp_' + settings.evalues[0] + '_combined.tmp')
outfile = open('out_blastp_' + evalue + '_trimmed.tmp', 'w')
seen = set()
withevalue = {}
# This generator expression goes line by line ONCE (instead of iteratively),
# and returns the values of each line, excluding self-hits (A1 -> A1) and hits with length less than 70%.
line = (line.strip().split('\t')[0:4] for line in blastoutput if len(line) > 0 and line.split('\t')[0] != line.split('\t')[1] and int(line.split('\t')[2]) >= settings.length and Decimal(line.split('\t')[3]) < Decimal(evalue))
for i in line:
pair = ','.join(sorted(i[0:2])) # Use sort() in order to make sure values of A1 -> B2 and B2 -> A1 are combined.
if pair not in seen:
seen.add(pair) # Seen is the master set of pairs
withevalue[pair] = [i[3]] # if an A->B not in seen, then we define that A->B as the key, and create a list with this first e-value for its value
elif pair in seen: # If we've already added this A->B pair,
withevalue[pair].append(i[3]) # then we append the e-value (value) to this existing pair (key) in our dictionary.
for x in withevalue:
outfile.write(x + ',' + ','.join(withevalue[x]) + '\n')
outfile.close()
def homolog_splitter(self, evalue):
# separate homologs among species from in-paralogs within species
# this is done such that their weights can be normalized respectively
print 'Separating orthologs and paralogs...'
with open('out_blastp_' + evalue + '_trimmed.tmp', 'r') as infile:
for line in infile:
line = line.strip().split(',')
qid = line[0].split('_',1)[0]
hid = line[1].split('_',1)[0]
line = ','.join(line)
with open('out_blastp_' + evalue + '_trimmed_splitted.tmp', 'a') as outfile:
if qid == hid:
outfile.write('p,' + line + '\n')
elif qid != hid:
outfile.write('o,' + line + '\n')
def recip_averager(self, evalue):
outfile = open('output_readyformcl_' + evalue + '.tmp', 'w')
with open('out_blastp_' + evalue + '_trimmed_splitted.tmp', 'r') as infile:
finalaveraged = {}
for line in infile:
line = line.strip().split(',')
pair = '\t'.join(line[1:3])
finalaveraged[pair] = ''
justevalues = line[3:]
eneglogs = []
for evalue in justevalues:
if evalue == '0' or evalue == '0.0' or evalue == 0 or evalue == 0.0:
evalue = 308.0 # Rounding up -log10(2.225074e-308), the lowest possible e-value before 0.0
else:
evalue = -math.log10(Decimal(evalue))
eneglogs.append(evalue)
averaged = sum(eneglogs) / len(eneglogs)
finalaveraged[pair] = str(averaged)
for x in finalaveraged:
outfile.write(x + '\t' + finalaveraged[x] + '\n')
| mit | Python | |
550ce185895a7b32f6bdb0750338ea6d2416ee2a | Add merged migration | Ircam-Web/mezzanine-organization,Ircam-Web/mezzanine-organization | organization/projects/migrations/0006_merge.py | organization/projects/migrations/0006_merge.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-07 14:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organization-projects', '0005_auto_20160907_1046'),
('organization-projects', '0005_auto_20160907_1138'),
]
operations = [
]
| agpl-3.0 | Python | |
f6148d7a4e2d080da93d21de2f13b601465c7528 | Add tf.contrib.checkpoint.CheckpointableBase for isinstance checks. | alsrgv/tensorflow,apark263/tensorflow,davidzchen/tensorflow,asimshankar/tensorflow,adit-chandra/tensorflow,jhseu/tensorflow,gojira/tensorflow,cxxgtxy/tensorflow,xzturn/tensorflow,karllessard/tensorflow,ghchinoy/tensorflow,xzturn/tensorflow,manipopopo/tensorflow,snnn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,apark263/tensorflow,ppwwyyxx/tensorflow,dancingdan/tensorflow,kobejean/tensorflow,DavidNorman/tensorflow,kobejean/tensorflow,benoitsteiner/tensorflow-xsmm,freedomtan/tensorflow,alshedivat/tensorflow,yongtang/tensorflow,dancingdan/tensorflow,karllessard/tensorflow,lukeiwanski/tensorflow,alsrgv/tensorflow,adit-chandra/tensorflow,hfp/tensorflow-xsmm,gunan/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,apark263/tensorflow,ageron/tensorflow,Intel-Corporation/tensorflow,arborh/tensorflow,davidzchen/tensorflow,sarvex/tensorflow,annarev/tensorflow,ghchinoy/tensorflow,aldian/tensorflow,aselle/tensorflow,gojira/tensorflow,caisq/tensorflow,aam-at/tensorflow,chemelnucfin/tensorflow,lukeiwanski/tensorflow,Bismarrck/tensorflow,paolodedios/tensorflow,xzturn/tensorflow,tensorflow/tensorflow,ghchinoy/tensorflow,gunan/tensorflow,alshedivat/tensorflow,dongjoon-hyun/tensorflow,dancingdan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,snnn/tensorflow,brchiu/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,gunan/tensorflow,freedomtan/tensorflow,meteorcloudy/tensorflow,hehongliang/tensorflow,ZhangXinNan/tensorflow,dancingdan/tensorflow,snnn/tensorflow,girving/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ghchinoy/tensorflow,frreiss/tensorflow-fred,asimshankar/tensorflow,meteorcloudy/tensorflow,karllessard/tensorflow,AnishShah/tensorflow,meteorcloudy/tensorflow,apark263/tensorflow,hfp/tensorflow-xsmm,apark263/tensorflow,gunan/tensorflow,DavidNorman/tensorflow,chemelnucfin/tensorflow,jart/tensorflow,kevin-coder/tensorflow-fork,jart/tensorflow,petewarden/tensorflow,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow,theflofly/tensorflow,Bismarrck/tensorflow,seanli9jan/tensorflow,petewarden/tensorflow,asimshankar/tensorflow,AnishShah/tensorflow,AnishShah/tensorflow,davidzchen/tensorflow,jbedorf/tensorflow,ageron/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,ageron/tensorflow,AnishShah/tensorflow,freedomtan/tensorflow,caisq/tensorflow,tensorflow/tensorflow,alsrgv/tensorflow,chemelnucfin/tensorflow,renyi533/tensorflow,jhseu/tensorflow,brchiu/tensorflow,chemelnucfin/tensorflow,kevin-coder/tensorflow-fork,aselle/tensorflow,jendap/tensorflow,arborh/tensorflow,aam-at/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,lukeiwanski/tensorflow,hfp/tensorflow-xsmm,freedomtan/tensorflow,girving/tensorflow,lukeiwanski/tensorflow,theflofly/tensorflow,Intel-tensorflow/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,dancingdan/tensorflow,adit-chandra/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow,benoitsteiner/tensorflow-xsmm,hehongliang/tensorflow,benoitsteiner/tensorflow-xsmm,ZhangXinNan/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gunan/tensorflow,AnishShah/tensorflow,apark263/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,arborh/tensorflow,theflofly/tensorflow,lukeiwanski/tensorflow,kevin-coder/tensorflow-fork,petewarden/tensorflow,manipopopo/tensorflow,snnn/tensorflow,ZhangXinNan/tensorflow,girving/tensorflow,dancingdan/tensorflow,aam-at/tensorflow,DavidNorman/tensorflow,davidzchen/tensorflow,ghchinoy/tensorflow,gojira/tensorflow,caisq/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,asimshankar/tensorflow,aam-at/tensorflow,ppwwyyxx/tensorflow,DavidNorman/tensorflow,Bismarrck/tensorflow,AnishShah/tensorflow,manipopopo/tensorflow,dongjoon-hyun/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Bismarrck/tensorflow,jendap/tensorflow,ageron/tensorflow,manipopopo/tensorflow,chemelnucfin/tensorflow,jendap/tensorflow,ZhangXinNan/tensorflow,benoitsteiner/tensorflow-xsmm,ghchinoy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,asimshankar/tensorflow,ageron/tensorflow,drpngx/tensorflow,davidzchen/tensorflow,renyi533/tensorflow,aselle/tensorflow,freedomtan/tensorflow,ZhangXinNan/tensorflow,ghchinoy/tensorflow,dongjoon-hyun/tensorflow,seanli9jan/tensorflow,jendap/tensorflow,alsrgv/tensorflow,seanli9jan/tensorflow,arborh/tensorflow,ZhangXinNan/tensorflow,ZhangXinNan/tensorflow,ageron/tensorflow,jart/tensorflow,jendap/tensorflow,renyi533/tensorflow,DavidNorman/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,ghchinoy/tensorflow,gojira/tensorflow,cxxgtxy/tensorflow,adit-chandra/tensorflow,snnn/tensorflow,renyi533/tensorflow,brchiu/tensorflow,seanli9jan/tensorflow,petewarden/tensorflow,aldian/tensorflow,apark263/tensorflow,theflofly/tensorflow,gunan/tensorflow,drpngx/tensorflow,jendap/tensorflow,yongtang/tensorflow,ageron/tensorflow,jbedorf/tensorflow,chemelnucfin/tensorflow,Bismarrck/tensorflow,gojira/tensorflow,hfp/tensorflow-xsmm,ghchinoy/tensorflow,AnishShah/tensorflow,meteorcloudy/tensorflow,seanli9jan/tensorflow,benoitsteiner/tensorflow-xsmm,adit-chandra/tensorflow,hfp/tensorflow-xsmm,jhseu/tensorflow,renyi533/tensorflow,manipopopo/tensorflow,girving/tensorflow,caisq/tensorflow,girving/tensorflow,tensorflow/tensorflow,chemelnucfin/tensorflow,yongtang/tensorflow,alsrgv/tensorflow,jalexvig/tensorflow,benoitsteiner/tensorflow-xsmm,apark263/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,renyi533/tensorflow,jalexvig/tensorflow,drpngx/tensorflow,renyi533/tensorflow,AnishShah/tensorflow,jalexvig/tensorflow,jendap/tensorflow,xodus7/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,annarev/tensorflow,benoitsteiner/tensorflow-xsmm,aam-at/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,adit-chandra/tensorflow,chemelnucfin/tensorflow,gojira/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,DavidNorman/tensorflow,Intel-Corporation/tensorflow,cxxgtxy/tensorflow,davidzchen/tensorflow,xodus7/tensorflow,alsrgv/tensorflow,seanli9jan/tensorflow,karllessard/tensorflow,asimshankar/tensorflow,davidzchen/tensorflow,xodus7/tensorflow,drpngx/tensorflow,frreiss/tensorflow-fred,jhseu/tensorflow,adit-chandra/tensorflow,girving/tensorflow,Bismarrck/tensorflow,Intel-tensorflow/tensorflow,ghchinoy/tensorflow,seanli9jan/tensorflow,annarev/tensorflow,jhseu/tensorflow,arborh/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aam-at/tensorflow,jendap/tensorflow,paolodedios/tensorflow,hfp/tensorflow-xsmm,yongtang/tensorflow,gunan/tensorflow,gojira/tensorflow,DavidNorman/tensorflow,jbedorf/tensorflow,meteorcloudy/tensorflow,jbedorf/tensorflow,xzturn/tensorflow,Bismarrck/tensorflow,kevin-coder/tensorflow-fork,Intel-tensorflow/tensorflow,AnishShah/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,jhseu/tensorflow,theflofly/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,asimshankar/tensorflow,alshedivat/tensorflow,jbedorf/tensorflow,manipopopo/tensorflow,benoitsteiner/tensorflow-xsmm,gautam1858/tensorflow,ppwwyyxx/tensorflow,manipopopo/tensorflow,kobejean/tensorflow,davidzchen/tensorflow,DavidNorman/tensorflow,xzturn/tensorflow,ageron/tensorflow,gunan/tensorflow,kevin-coder/tensorflow-fork,alsrgv/tensorflow,hehongliang/tensorflow,gautam1858/tensorflow,dongjoon-hyun/tensorflow,chemelnucfin/tensorflow,gautam1858/tensorflow,drpngx/tensorflow,davidzchen/tensorflow,alsrgv/tensorflow,apark263/tensorflow,karllessard/tensorflow,arborh/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,meteorcloudy/tensorflow,apark263/tensorflow,jart/tensorflow,brchiu/tensorflow,tensorflow/tensorflow-pywrap_saved_model,girving/tensorflow,girving/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,kobejean/tensorflow,hehongliang/tensorflow,yongtang/tensorflow,aldian/tensorflow,seanli9jan/tensorflow,gunan/tensorflow,ZhangXinNan/tensorflow,sarvex/tensorflow,ppwwyyxx/tensorflow,jalexvig/tensorflow,jart/tensorflow,ageron/tensorflow,davidzchen/tensorflow,dongjoon-hyun/tensorflow,renyi533/tensorflow,jhseu/tensorflow,theflofly/tensorflow,freedomtan/tensorflow,gautam1858/tensorflow,jart/tensorflow,caisq/tensorflow,meteorcloudy/tensorflow,Bismarrck/tensorflow,aam-at/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_tf_optimizer,gojira/tensorflow,dongjoon-hyun/tensorflow,xodus7/tensorflow,gautam1858/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,dancingdan/tensorflow,annarev/tensorflow,kevin-coder/tensorflow-fork,caisq/tensorflow,DavidNorman/tensorflow,aselle/tensorflow,chemelnucfin/tensorflow,brchiu/tensorflow,jbedorf/tensorflow,jalexvig/tensorflow,dongjoon-hyun/tensorflow,dongjoon-hyun/tensorflow,alshedivat/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,frreiss/tensorflow-fred,aselle/tensorflow,seanli9jan/tensorflow,jbedorf/tensorflow,snnn/tensorflow,Intel-Corporation/tensorflow,hehongliang/tensorflow,jart/tensorflow,xzturn/tensorflow,xzturn/tensorflow,alshedivat/tensorflow,snnn/tensorflow,Bismarrck/tensorflow,asimshankar/tensorflow,kevin-coder/tensorflow-fork,brchiu/tensorflow,snnn/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow,alshedivat/tensorflow,gojira/tensorflow,theflofly/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,manipopopo/tensorflow,davidzchen/tensorflow,seanli9jan/tensorflow,lukeiwanski/tensorflow,annarev/tensorflow,jalexvig/tensorflow,ZhangXinNan/tensorflow,hfp/tensorflow-xsmm,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,aselle/tensorflow,jart/tensorflow,arborh/tensorflow,caisq/tensorflow,jbedorf/tensorflow,karllessard/tensorflow,jbedorf/tensorflow,petewarden/tensorflow,aldian/tensorflow,jhseu/tensorflow,dancingdan/tensorflow,adit-chandra/tensorflow,Bismarrck/tensorflow,kobejean/tensorflow,yongtang/tensorflow,ageron/tensorflow,dancingdan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,xzturn/tensorflow,DavidNorman/tensorflow,jhseu/tensorflow,alshedivat/tensorflow,karllessard/tensorflow,aam-at/tensorflow,drpngx/tensorflow,frreiss/tensorflow-fred,freedomtan/tensorflow,xzturn/tensorflow,gunan/tensorflow,ghchinoy/tensorflow,manipopopo/tensorflow,tensorflow/tensorflow,ppwwyyxx/tensorflow,kobejean/tensorflow,tensorflow/tensorflow,hehongliang/tensorflow,lukeiwanski/tensorflow,xodus7/tensorflow,xzturn/tensorflow,aam-at/tensorflow,arborh/tensorflow,jalexvig/tensorflow,jart/tensorflow,jendap/tensorflow,asimshankar/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,asimshankar/tensorflow,lukeiwanski/tensorflow,jendap/tensorflow,renyi533/tensorflow,alshedivat/tensorflow,kobejean/tensorflow,girving/tensorflow,theflofly/tensorflow,aselle/tensorflow,arborh/tensorflow,cxxgtxy/tensorflow,yongtang/tensorflow,annarev/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ageron/tensorflow,kobejean/tensorflow,aldian/tensorflow,arborh/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow-pywrap_saved_model,caisq/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,dancingdan/tensorflow,paolodedios/tensorflow,ZhangXinNan/tensorflow,kobejean/tensorflow,brchiu/tensorflow,meteorcloudy/tensorflow,AnishShah/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,brchiu/tensorflow,ppwwyyxx/tensorflow,kobejean/tensorflow,seanli9jan/tensorflow,kevin-coder/tensorflow-fork,lukeiwanski/tensorflow,annarev/tensorflow,snnn/tensorflow,DavidNorman/tensorflow,manipopopo/tensorflow,dongjoon-hyun/tensorflow,hfp/tensorflow-xsmm,chemelnucfin/tensorflow,Intel-tensorflow/tensorflow,kevin-coder/tensorflow-fork,benoitsteiner/tensorflow-xsmm,tensorflow/tensorflow,sarvex/tensorflow,adit-chandra/tensorflow,xodus7/tensorflow,davidzchen/tensorflow,aselle/tensorflow,sarvex/tensorflow,jalexvig/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ageron/tensorflow,tensorflow/tensorflow-pywrap_saved_model,caisq/tensorflow,theflofly/tensorflow,aldian/tensorflow,snnn/tensorflow,alsrgv/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,kevin-coder/tensorflow-fork,jhseu/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,dancingdan/tensorflow,sarvex/tensorflow,aam-at/tensorflow,DavidNorman/tensorflow,Intel-tensorflow/tensorflow,ppwwyyxx/tensorflow,meteorcloudy/tensorflow,aldian/tensorflow,lukeiwanski/tensorflow,brchiu/tensorflow,sarvex/tensorflow,freedomtan/tensorflow,xodus7/tensorflow,petewarden/tensorflow,hehongliang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,jbedorf/tensorflow,brchiu/tensorflow,adit-chandra/tensorflow,Bismarrck/tensorflow,frreiss/tensorflow-fred,petewarden/tensorflow,adit-chandra/tensorflow,caisq/tensorflow,jhseu/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,dongjoon-hyun/tensorflow,paolodedios/tensorflow,drpngx/tensorflow,aam-at/tensorflow,chemelnucfin/tensorflow,karllessard/tensorflow,snnn/tensorflow,aam-at/tensorflow,benoitsteiner/tensorflow-xsmm,manipopopo/tensorflow,jalexvig/tensorflow,girving/tensorflow,ppwwyyxx/tensorflow,arborh/tensorflow,Intel-Corporation/tensorflow,aselle/tensorflow,aselle/tensorflow,ppwwyyxx/tensorflow,paolodedios/tensorflow,jhseu/tensorflow,frreiss/tensorflow-fred,asimshankar/tensorflow,alshedivat/tensorflow,hfp/tensorflow-xsmm,Intel-Corporation/tensorflow,jart/tensorflow,jalexvig/tensorflow,yongtang/tensorflow,jbedorf/tensorflow,xodus7/tensorflow,gojira/tensorflow,jendap/tensorflow,drpngx/tensorflow,renyi533/tensorflow,ZhangXinNan/tensorflow,drpngx/tensorflow,apark263/tensorflow,cxxgtxy/tensorflow,kobejean/tensorflow,ppwwyyxx/tensorflow,gunan/tensorflow,meteorcloudy/tensorflow,alsrgv/tensorflow,arborh/tensorflow,aldian/tensorflow,theflofly/tensorflow,alshedivat/tensorflow,xodus7/tensorflow,petewarden/tensorflow,petewarden/tensorflow,cxxgtxy/tensorflow,girving/tensorflow,jalexvig/tensorflow,theflofly/tensorflow,frreiss/tensorflow-fred,alshedivat/tensorflow,AnishShah/tensorflow,gunan/tensorflow,xodus7/tensorflow,ppwwyyxx/tensorflow,paolodedios/tensorflow,hfp/tensorflow-xsmm,petewarden/tensorflow,gojira/tensorflow,Intel-tensorflow/tensorflow,drpngx/tensorflow,brchiu/tensorflow,jbedorf/tensorflow,dongjoon-hyun/tensorflow,hfp/tensorflow-xsmm,annarev/tensorflow,xodus7/tensorflow,benoitsteiner/tensorflow-xsmm,aselle/tensorflow,theflofly/tensorflow,xzturn/tensorflow | tensorflow/contrib/checkpoint/__init__.py | tensorflow/contrib/checkpoint/__init__.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for working with object-based checkpoints.
Visualization and inspection:
@@dot_graph_from_checkpoint
@@list_objects
@@object_metadata
Managing dependencies:
@@capture_dependencies
@@Checkpointable
@@CheckpointableBase
@@CheckpointableObjectGraph
@@NoDependency
@@split_dependency
Checkpointable data structures:
@@List
@@Mapping
@@UniqueNameTracker
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.checkpoint.python.containers import UniqueNameTracker
from tensorflow.contrib.checkpoint.python.split_dependency import split_dependency
from tensorflow.contrib.checkpoint.python.visualize import dot_graph_from_checkpoint
from tensorflow.core.protobuf.checkpointable_object_graph_pb2 import CheckpointableObjectGraph
from tensorflow.python.training.checkpointable.base import Checkpointable
from tensorflow.python.training.checkpointable.base import CheckpointableBase
from tensorflow.python.training.checkpointable.base import NoDependency
from tensorflow.python.training.checkpointable.data_structures import List
from tensorflow.python.training.checkpointable.data_structures import Mapping
from tensorflow.python.training.checkpointable.util import capture_dependencies
from tensorflow.python.training.checkpointable.util import list_objects
from tensorflow.python.training.checkpointable.util import object_metadata
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(module_name=__name__)
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for working with object-based checkpoints.
Visualization and inspection:
@@dot_graph_from_checkpoint
@@list_objects
@@object_metadata
Managing dependencies:
@@capture_dependencies
@@Checkpointable
@@CheckpointableObjectGraph
@@NoDependency
@@split_dependency
Checkpointable data structures:
@@List
@@Mapping
@@UniqueNameTracker
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.checkpoint.python.containers import UniqueNameTracker
from tensorflow.contrib.checkpoint.python.split_dependency import split_dependency
from tensorflow.contrib.checkpoint.python.visualize import dot_graph_from_checkpoint
from tensorflow.core.protobuf.checkpointable_object_graph_pb2 import CheckpointableObjectGraph
from tensorflow.python.training.checkpointable.base import Checkpointable
from tensorflow.python.training.checkpointable.base import NoDependency
from tensorflow.python.training.checkpointable.data_structures import List
from tensorflow.python.training.checkpointable.data_structures import Mapping
from tensorflow.python.training.checkpointable.util import capture_dependencies
from tensorflow.python.training.checkpointable.util import list_objects
from tensorflow.python.training.checkpointable.util import object_metadata
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(module_name=__name__)
| apache-2.0 | Python |
7ef03c975566b92fd97b7071b39cf3d8c242e480 | Create brick.py | petehopkins/Untitled-CSET1100-Project | brick.py | brick.py | # Class: Brick
# Represents a single brick as displayed on screen.
# Used as a target for the Ball to break
# Requires pygame
import pygame
class Brick(pygame.sprite.Sprite):
__borderWidth = 2
__hitsRemaining = 1
__position = {"x": 0, "y": 0}
__size = 25
__whRatio = {"width": 2, "height": 1}
__width = __size * __whRatio["width"]
__height = __size * __whRatio["height"]
__outerRect = pygame.Rect(__position["x"], __position["y"], __width, __height)
__innerRect = pygame.Rect(__position["x"] + __borderWidth, __position["y"] + __borderWidth, __width - (__borderWidth * 2), __height - (__borderWidth * 2))
__isInPlay = True
def __init__(self, position, fill = None, border = None):
from Game import Engine
super().__init__()
if fill != None:
self.__fill = fill
else:
self.__fill = Engine.Colors.LAVENDER
if border != None:
self.__border = border
else:
self.__border = Engine.Colors.BLACK
self.__position["x"] = position[0]
self.__position["y"] = position[1]
self.__outerRect = pygame.Rect(self.__position["x"], self.__position["y"], self.__width, self.__height)
self.__innerRect = pygame.Rect(self.__position["x"] + self.__borderWidth, self.__position["y"] + self.__borderWidth, self.__width - (self.__borderWidth * 2), self.__height - (self.__borderWidth * 2))
self.__hitsRemaining = 1
self.rect = self.__outerRect
self.image = pygame.Surface([self.__width, self.__height])
self.image.fill(self.__border)
self.brick = pygame.Surface((self.__innerRect.width, self.__innerRect.height))
self.brick.fill(self.__fill)
self.image.blit(self.brick, self.__innerRect)
def update(self):
pass
def getWidth(self):
return self.__width
def getHeight(self):
return self.__height
def __removeFromPlay(self):
self.__isInPlay = False #set flag
def __animate(self):
if self.__hitsRemaining <= 0:
self.__removeFromPlay() #no hits remaining, get rid of this one
def collide(self):
self.__hitsRemaining -= 1 #decrement hits
self.__animate() #and animate the hit
def stack(self, stage):
if self.__isInPlay:
stage.blit(self, self.rect) #draw border
| mit | Python | |
c1d3a8d15d3e50a14ff765e7abd063cc1b390063 | add new test case TestAssociator | alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl,TaiSakuma/AlphaTwirl,alphatwirl/alphatwirl | tests/unit/EventReader/test_Associator.py | tests/unit/EventReader/test_Associator.py | from AlphaTwirl.EventReader import Associator
import unittest
##____________________________________________________________________________||
class MockReader(object):
def __init__(self):
self.content = [ ]
##____________________________________________________________________________||
class MockCollector(object):
def __init__(self):
self.readers = [ ]
def addReader(self, datasetName, reader):
self.readers.append((datasetName, reader))
##____________________________________________________________________________||
class TestAssociator(unittest.TestCase):
def test_make(self):
reader = MockReader()
collector = MockCollector()
associator = Associator(reader, collector)
reader1 = associator.make("data1")
self.assertIsNot(reader, reader1)
self.assertIsNot(reader.content, reader1.content)
self.assertIsInstance(reader1, MockReader)
self.assertEqual([("data1", reader1)], collector.readers)
def test_NullCollector(self):
reader = MockReader()
associator = Associator(reader)
reader1 = associator.make("data1")
##____________________________________________________________________________||
| bsd-3-clause | Python | |
6a4152e805be0ba061529841fb84442d8a23ff9f | add label transform cpn | FederatedAI/FATE,FederatedAI/FATE,FederatedAI/FATE | python/federatedml/components/label_transform.py | python/federatedml/components/label_transform.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
label_transform_cpn_meta = ComponentMeta("LabelTransform")
@label_transform_cpn_meta.bind_param
def label_transform_param():
from federatedml.param.label_transform_param import LabelTransformParam
return LabelTransformParam
@label_transform_cpn_meta.bind_runner.on_guest.on_host
def label_transform_client_runner():
from federatedml.util.label_transform import LabelTransformer
return LabelTransformer
| apache-2.0 | Python | |
99ab22cf5fcba719dd7d9d87c18c8d93de5591a4 | Add IO Class | yasn77/whitepy | whitepy/ws_io.py | whitepy/ws_io.py | import readchar
import sys
class IO(object):
def __init__(self, stack):
self.stack = stack
def i_chr(self, heap):
self.stack.push(readchar.readchar())
heap.set()
def i_int(self, heap):
num = None
while type(num) is not int:
try:
num = int(readchar.readchar())
except ValueError:
pass
self.stack.push(num)
heap.set()
def o_chr(self):
sys.stdout.buffer.write(self.stack.pop().encode('utf-8'))
def o_int(self):
sys.stdout.buffer.write(str(self.stack.pop()).encode('utf-8'))
| apache-2.0 | Python | |
6279341682ae45a228302972dbd106a2e44e0b12 | Add example usage of the JsonTestResponse. | craig552uk/flask-json | examples/example_test.py | examples/example_test.py | import unittest
from flask import Flask
from flask_json import json_response, FlaskJSON, JsonTestResponse
def our_app():
app = Flask(__name__)
app.test_value = 0
FlaskJSON(app)
@app.route('/increment')
def increment():
app.test_value += 1
return json_response(value=app.test_value)
return app
class OurAppTestCase(unittest.TestCase):
def setUp(self):
self.app = our_app()
self.app.config['TESTING'] = True
# We have to change response class manually since TESTING flag is
# set after Flask-JSON initialization.
self.app.response_class = JsonTestResponse
self.client = self.app.test_client()
def test_app(self):
r = self.client.get('/increment')
# Here is how we can access to JSON.
assert 'value' in r.json
assert r.json['value'] == 1
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python | |
1fd997bc11b62cb760470fb749c2a4f0261b3e00 | Add db2es.py to sync data | SHSIDers/oclubs,SHSIDers/oclubs,SHSIDers/oclubs,SHSIDers/oclubs | db2es.py | db2es.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
from __future__ import unicode_literals, absolute_import
import time
from elasticsearch.helpers import scan
from elasticsearch.exceptions import NotFoundError
from oclubs.app import app
from oclubs.access import database, elasticsearch, done
from oclubs.objs import Activity, Club
clses = [Club, Activity]
with app.app_context():
for cls in clses:
db_ids = database.fetch_onecol(
cls.table,
cls.identifier,
{}
)
db_ids = set(int(x) for x in db_ids)
db_max = max(db_ids)
try:
es_ids = scan(
elasticsearch.es,
index='oclubs',
doc_type=cls.table,
size=10000000,
query={
'query': {'match_all': {}},
'size': 10000,
'fields': ['_id']
})
es_ids = (d['_id'] for d in es_ids)
except NotFoundError:
es_ids = []
es_ids = set(int(x) for x in es_ids)
if es_ids:
es_max = max(es_ids)
else:
es_max = 0
max_id = max(db_max, es_max)
cls_searchprops = [
prop.name for prop in [
getattr(cls, propname) for propname in dir(cls)
] if hasattr(prop, 'search') and prop.search
]
for i in xrange(1, max_id + 1):
time.sleep(0.01)
if i in db_ids:
obj = cls(i)
db_data = {}
for propname in cls_searchprops:
db_data[propname] = (
getattr(cls, propname).search(getattr(obj, propname)))
if i in es_ids:
es_data = elasticsearch.get(cls.table, i)
if db_data == es_data:
print 'TYPE %s ID %d MATCH' % (cls.table, i)
else:
print 'UPDATED ES TYPE %s ID %d' % (cls.table, i)
elasticsearch.update(cls.table, i, db_data)
else:
print 'CREATED ES TYPE %s ID %d' % (cls.table, i)
elasticsearch.create(cls.table, i, db_data)
else:
if i in es_ids:
print 'DELETED ES TYPE %s ID %d' % (cls.table, i)
elasticsearch.delete(cls.table, i)
else:
print 'TYPE %s ID %d DOES NOT EXIST' % (cls.table, i)
pass
done()
| mit | Python | |
43e019ff26e04a6464cad3a10045ba600e98610e | Add __init__.py for monitorlib module. | krux/monitorlib | monitorlib/__init__.py | monitorlib/__init__.py | ### -*- coding: utf-8 -*-
###
### © 2012 Krux Digital, Inc.
### Author: Paul Lathrop <paul@krux.com>
###
"""Library for creating monitoring scripts/plugins."""
| mit | Python | |
2427dbad4fc0cfe7685dc2767069748d37262796 | Add initial version of identification algorithm | divijbindlish/movienamer | movienamer/identify.py | movienamer/identify.py | import os.path as path
import re
import Levenshtein
from .sanitize import sanitize
from .tmdb import search
def _gather(filename, directory=None, titles={}):
# Sanitize the input filename
name, year = sanitize(filename)
# Start with a basic search
results = search(name, year)
if year is not None and len(results) == 0:
# If no results are found when year is present,
# allow a tolerance of 1 in the year
results = search(name, year + 1)
results = results + search(name, year - 1)
# Try to find a result with zero error and return
zero_distance_results = []
for i, result in enumerate(results):
distance = Levenshtein.distance(
unicode(re.sub('[^a-zA-Z0-9]', '', name.lower())),
unicode(re.sub('[^a-zA-Z0-9]', '', result['title'].lower()))
)
# Update the results with the distance
result['distance'] = distance
results[i]['distance'] = distance
# Update the results with year
result['with_year'] = (year is not None)
results[i]['with_year'] = (year is not None)
# Add count field to the result
result['count'] = 1
results[i]['count'] = 1
if distance == 0:
zero_distance_results.append(result)
if len(zero_distance_results) > 0:
# Directly return results with zero error
return zero_distance_results
if year is not None and len(results) > 0:
# Directly return results which were queried with year
return results
# If neither zero distance results are present nor is the year,
# accumulate results from directory one level up
if directory is not None:
dirname = directory.split('/')[-1]
results_from_directory = identify(dirname)
results_to_be_removed = []
# Increment count for all duplicate results
for i, r1 in enumerate(results):
for r2 in results_from_directory:
if r1['popularity'] == r2['popularity']:
# Check with popularity since title can be duplicate
results[i]['count'] += r2['count']
results_from_directory.remove(r2)
break
results = results + results_from_directory
return results
def identify(filename, directory=None):
results = _gather(filename, directory)
max_distance = 1 + max([result['distance'] for result in results])
return sorted(
results,
lambda r: (r['count'] ** 1.1) \
* ((max_distance - r['distance'])) \
* ((1 + r['with_year'])) \
* ((r['popularity'])),
reverse=True
)
| mit | Python | |
2ac52ea39a7a8db6cab756e3af2f65b228bb1c09 | Add registration test | zsloan/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2 | test/requests/test-registration.py | test/requests/test-registration.py | import sys
import unittest
import requests
import logging
from elasticsearch import Elasticsearch, TransportError
#from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT
GN2_SERVER = None
ES_SERVER = None
class TestRegistration(unittest.TestCase):
def setUp(self):
self.url = GN2_SERVER+"/n/register"
self.es = Elasticsearch([ES_SERVER])
self.es_cleanup = []
es_logger = logging.getLogger("elasticsearch")
es_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationInfo.log"))
es_trace_logger = logging.getLogger("elasticsearch.trace")
es_trace_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationTrace.log"))
def tearDown(self):
for item in self.es_cleanup:
self.es.delete(index="users", doc_type="local", id=item["_id"])
def testRegistrationPage(self):
if self.es.ping():
data = {
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
requests.post(self.url, data)
response = self.es.search(
index="users"
, doc_type="local"
, body={
"query": {"match": {"email_address": "test@user.com"}}})
self.assertEqual(len(response["hits"]["hits"]), 1)
self.es_cleanup.append(response["hits"]["hits"][0])
else:
self.skipTest("The elasticsearch server is down")
def main():
suite = unittest.TestSuite()
suite.addTest(TestRegistration("testRegistrationPage"))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
GN2_SERVER = sys.argv[1]
ES_SERVER = sys.argv[2]
main()
| agpl-3.0 | Python | |
ee3e0d444dd706858a3a30cf52ebc2a960bcfb56 | add a just for funsies pygame renderer | pusscat/refNes | renderer-pygame.py | renderer-pygame.py | import pygame
class Palette():
def __init__(self, ppu):
self.ppu = ppu
self.colors = [(0x7C,0x7C,0x7C),(00,00,0xFC),(00,00,0xBC),(44,28,0xBC),(94,00,84),(0xA8,00,20),(0xA8,10,00),(88,14,00),(50,30,00),(00,78,00),(00,68,00),(00,58,00),(00,40,58),(00,00,00),(00,00,00),(00,00,00),(0xBC,0xBC,0xBC),(00,78,0xF8),(00,58,0xF8),(68,44,0xFC),(0xD8,00,0xCC),(0xE4,00,58),(0xF8,38,00),(0xE4,0x5C,10),(0xAC,0x7C,00),(00,0xB8,00),(00,0xA8,00),(00,0xA8,44),(00,88,88),(00,00,00),(00,00,00),(00,00,00),(0xF8,0xF8,0xF8),(0x3C,0xBC,0xFC),(68,88,0xFC),(98,78,0xF8),(0xF8,78,0xF8),(0xF8,58,98),(0xF8,78,58),(0xFC,0xA0,44),(0xF8,0xB8,00),(0xB8,0xF8,18),(58,0xD8,54),(58,0xF8,98),(00,0xE8,0xD8),(78,78,78),(00,00,00),(00,00,00),(0xFC,0xFC,0xFC),(0xA4,0xE4,0xFC),(0xB8,0xB8,0xF8),(0xD8,0xB8,0xF8),(0xF8,0xB8,0xF8),(0xF8,0xA4,0xC0),(0xF0,0xD0,0xB0),(0xFC,0xE0,0xA8),(0xF8,0xD8,78),(0xD8,0xF8,78),(0xB8,0xF8,0xB8),(0xB8,0xF8,0xD8),(00,0xFC,0xFC),(0xF8,0xD8,0xF8),(00,00,00),(00,00,00)]
def GetColor(self, colorNum):
if colorNum == 0x40:
return self.colors[self.ppu.lastBGWrite]
return self.colors[colorNum]
class Renderer():
def __init__(self, ppu, scale=1):
self.palette = Palette(ppu)
pygame.init()
self.scale = scale
self.window = pygame.display.set_mode([256*scale,240*scale])
pygame.display.set_caption('refNes')
# draw a pixelly thing
def Update(self, screen, y):
for x in range(0, 256):
screenIndex = (y * 256) + x
rgb = self.palette.GetColor(screen[screenIndex])
color = (rgb[0], rgb[1], rgb[2])
area = (x*self.scale, y*self.scale, self.scale, self.scale)
self.window.fill(color, area)
pygame.display.flip()
| bsd-2-clause | Python | |
669a4880da91b93c0ba00a2c44ce02e583505f6c | Add a script to generate optical flow for vid files. | myfavouritekk/TPN | tools/data/gen_vid_optical_flow.py | tools/data/gen_vid_optical_flow.py | #!/usr/bin/env python
import argparse
import cv2
import os
import glob
import sys
import numpy as np
import scipy.io as sio
import time
from vdetlib.utils.protocol import proto_load, frame_path_at
def cvReadGrayImg(img_path):
return cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2GRAY)
def saveOptFlowToImage(flow, basename, merge):
if merge:
# save x, y flows to r and g channels, since opencv reverses the colors
cv2.imwrite(basename+'.png', flow[:,:,::-1])
else:
cv2.imwrite(basename+'_x.JPEG', flow[...,0])
cv2.imwrite(basename+'_y.JPEG', flow[...,1])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('save_dir')
parser.add_argument('--bound', type=float, required=False, default=15,
help='Optical flow bounding.')
parser.add_argument('--merge', dest='merge', action='store_true')
parser.add_argument('--debug', dest='visual_debug', action='store_true')
parser.set_defaults(merge=False, visual_debug=False)
args = parser.parse_args()
norm_width = 500.
bound = args.bound
vid_proto = proto_load(args.vid_file)
print ("Processing {}: {} files... ".format(args.vid_file,
len(vid_proto['frames']))),
sys.stdout.flush()
tic = time.time()
for frame1, frame2 in zip(vid_proto['frames'][:-1],
vid_proto['frames'][1:]):
img_path = frame_path_at(vid_proto, frame1['frame'])
img1 = cvReadGrayImg(img_path)
img2 = cvReadGrayImg(frame_path_at(vid_proto, frame2['frame']))
h, w = img1.shape
fxy = norm_width / w
# normalize image size
flow = cv2.calcOpticalFlowFarneback(
cv2.resize(img1, None, fx=fxy, fy=fxy),
cv2.resize(img2, None, fx=fxy, fy=fxy),
0.5, 3, 15, 3, 7, 1.5, 0)
# map optical flow back
flow = flow / fxy
# normalization
flow = np.round((flow + bound) / (2. * bound) * 255.)
flow[flow < 0] = 0
flow[flow > 255] = 255
flow = cv2.resize(flow, (w, h))
# Fill third channel with zeros
flow = np.concatenate((flow, np.zeros((h,w,1))), axis=2)
# save
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
basename = os.path.splitext(os.path.basename(img_path))[0]
saveOptFlowToImage(flow, os.path.join(args.save_dir, basename), args.merge)
if args.visual_debug:
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv = np.zeros_like(cv2.imread(img_path))
hsv[...,1] = 255
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
cv2.imshow('optical flow',bgr)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# duplicate last frame
basename = os.path.splitext(os.path.basename(
vid_proto['frames'][-1]['path']))[0]
saveOptFlowToImage(flow, os.path.join(args.save_dir, basename), args.merge)
toc = time.time()
print "{:.2f} min, {:.2f} fps".format((toc-tic) / 60., 1. * len(vid_proto['frames']) / (toc - tic))
| mit | Python | |
75f666ad189c5a799582ce567f0df8b7848066d5 | replace spy solved | xala3pa/Computer-Science-cs101 | Lesson3/replace_spy.py | Lesson3/replace_spy.py | # Define a procedure, replace_spy,
# that takes as its input a list of
# three numbers, and modifies the
# value of the third element in the
# input list to be one more than its
# previous value.
spy = [0,0,7]
def replace_spy(spy):
spy[2] = spy[2] + 1
return spy
# In the test below, the first line calls your
# procedure which will change spy, and the
# second checks you have changed it.
# Uncomment the top two lines below.
replace_spy(spy)
print spy
#>>> [0,0,8]
| mit | Python | |
83e136a0e0d93d1dde4966322a3b51f453d0a1ba | Add simple CSV exporter to examples. | SeNeReKo/TCFlib | tcflib/examples/csv_exporter.py | tcflib/examples/csv_exporter.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from io import StringIO
from collections import OrderedDict
from tcflib.service import ExportingWorker, run_as_cli
class CSVExporter(ExportingWorker):
def export(self):
columns = OrderedDict()
columns['tokenID'] = [token.id for token in self.corpus.tokens]
columns['token'] = [token.text for token in self.corpus.tokens]
if hasattr(self.corpus, 'postags'):
columns['POStag'] = [token.tag for token in self.corpus.tokens]
if hasattr(self.corpus, 'lemmas'):
columns['lemma'] = [token.lemma for token in self.corpus.tokens]
if hasattr(self.corpus, 'wsd'):
columns['wordsenses'] = [', '.join(token.wordsenses)
for token in self.corpus.tokens]
if hasattr(self.corpus, 'namedentities'):
entities = []
for token in self.corpus.tokens:
if not token.entity:
entities.append('')
elif token == token.entity.tokens[0]:
entities.append('B-{}'.format(token.entity.class_))
else:
entities.append('I-{}'.format(token.entity.class_))
columns['NamedEntity'] = entities
# Write to CSV
with StringIO(newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(columns.keys()))
for row in zip(*columns.values()):
writer.writerow(row)
outstring = csvfile.getvalue()
return outstring.encode('utf-8')
if __name__ == '__main__':
run_as_cli(CSVExporter) | mit | Python | |
75cedb719385b70d08805483fbeda07222031f98 | Add comparison MID generator script. | jmtd/freedoom,CWolfRU/freedoom,jmtd/freedoom,CWolfRU/freedoom,jmtd/freedoom | lumps/dmxgus/comparison.py | lumps/dmxgus/comparison.py | # Generate comparison MIDI file.
#
# The comparison MIDI is used for testing and tweaking the similarity
# groups in the configuration file. In each group, the instruments in
# the group should sound broadly similar, and the first in the group
# should be able to substitute for any member of the group.
#
# Each similarity group is played in order, with a pause between each
# instrument. A programme of the instruments is printed to stdout so
# that the user can identify each instrument in order.
#
# To use the comparison MIDI, use a port that supports Timidity for
# MIDI playback, and configure it to use the GUS patch set (use
# music/dgguspat.zip from the idgames archive). Use the "full"
# configuration file (timidity.cfg in the .zip) so that every
# instrument is used for playback. Rename comparison.mid to
# d_runnin.lmp and run with '-file d_runnin.lmp -warp 1', so that the
# music plays.
from config import *
import midi
def instrument_num(instrument):
"""Given a GUS patch name, get the MIDI instrument number.
For percussion instruments, the instrument number is offset
by 128.
"""
for key, name in GUS_INSTR_PATCHES.items():
if name == instrument:
return key
raise Exception('Unknown instrument %s' % instrument)
pattern = midi.Pattern(resolution=48)
track = midi.Track()
track.append(midi.ControlChangeEvent(tick=0, channel=9, data=[7, 92]))
time = 500
for group in SIMILAR_GROUPS:
# Don't bother with special effects.
#if group[0] == 'blank':
# continue
print "Group: "
empty = True
for instrument in group:
inum = instrument_num(instrument)
# For normal instruments, play a couple of different notes.
# For percussion instruments, play a couple of note on
# the percussion channel.
if inum <= 128:
print "\t%s (%i)" % (instrument, inum)
track.extend([
midi.ProgramChangeEvent(tick=time, channel=0,
data=[inum]),
midi.NoteOnEvent(tick=0, channel=0,
velocity=92, pitch=midi.A_3),
midi.NoteOffEvent(tick=50, channel=0,
velocity=92, pitch=midi.A_3),
midi.NoteOnEvent(tick=0, channel=0,
velocity=92, pitch=midi.B_3),
midi.NoteOffEvent(tick=50, channel=0,
velocity=92, pitch=midi.B_3),
])
else:
print "\t%s (percussion %i)" % (instrument, inum-128)
track.extend([
midi.NoteOnEvent(tick=time, channel=9,
velocity=92, pitch=inum-128),
midi.NoteOffEvent(tick=50, channel=9,
pitch=inum-128),
midi.NoteOnEvent(tick=0, channel=9,
velocity=92, pitch=inum-128),
midi.NoteOffEvent(tick=50, channel=9,
pitch=inum-128),
])
empty = False
time = 100
if not empty:
time = 500
# Four drumbeats indicate the end of the track and that the music is
# going to loop.
for i in range(4):
track.extend([
midi.NoteOnEvent(tick=20, channel=9, velocity=92, pitch=35),
midi.NoteOffEvent(tick=20, channel=9, pitch=35),
])
track.append(midi.EndOfTrackEvent(tick=1))
pattern.append(track)
# Save the pattern to disk
midi.write_midifile("comparison.mid", pattern)
| bsd-3-clause | Python | |
a509828f5d5040b1b005fe602ad0e53675b8cb52 | add to test | banzo/mongo-connector,wzeng/mongo-connector,LonelyPale/mongo-connector,apicht-spireon/mongo-connector,lchqfnu/mongo-connector,Sebmaster/mongo-connector,silver-/mongo-connector,imclab/mongo-connector,dgsh/mongo-connector,turbidsoul/mongo-connector,sailthru/mongo-connector,tonyzhu/mongo-connector,10gen-labs/mongo-connector,MartinNowak/mongo-connector,anmolonruby/mongo-connector,TPopovich/mongo-connector,vietanh85/mongo-connector,user-tony/mongo-connector,adammendoza/mongo-connector,skijash/mongo-connector,lchqfnu/mongo-connector,kauppalehti/mongo-connector,algolia/mongo-connector,RapidRatings/mongo-connector,keithhigbee/mongo-connector,shridhar-b/mongo-connector,drdk/mongo-connector,user-tony/mongo-connector,keithhigbee/mongo-connector,wzeng/mongo-connector,idealo/mongo-connector,Philmod/mongo-connector,Branor/mongo-connector,asifhj/mongo-connector,mongodb-labs/mongo-connector,agolo/mongo-connector,jgrivolla/mongo-connector,jtharpla/mongo-connector,silver-/mongo-connector,jaredkipe/mongo-connector,idealo/mongo-connector,NetIQ/mongo-connector,mongodb-labs/mongo-connector,jtharpla/mongo-connector,anuragkapur/mongo-connector,agolo/mongo-connector,anmolonruby/mongo-connector,ShaneHarvey/mongo-connector,ineo4j/mongo-connector,YuriyIlyin/mongo-connector,kpsarthi/mongo-connector,Livefyre/mongo-connector,ineo4j/mongo-connector,takao-s/mongo-connector,Sebmaster/mongo-connector,homerquan/mongo-connector,jgerrish/mongo-connector,adgaudio/mongo-connector,xmasotto/mongo-connector,sailthru/mongo-connector,hannelita/mongo-connector,yeroon/mongo-connector,algolia/mongo-connector,izzui/mongo-connector,zatar-iot/mongo-connector,carl0224/mongo-connector,XDestination/mongo-connector,anuragkapur/mongo-connector,XDestination/mongo-connector,shridhar-b/mongo-connector,MartinNowak/mongo-connector,hzwjava/mongo-connector,hzwjava/mongo-connector,turbidsoul/mongo-connector,imclab/mongo-connector,devopservices/mongo-connector,maxcnunes/mongo-connector,benjamine/mongo-connector,orchardmile/mongo-connector,zatar-iot/mongo-connector,yeroon/mongo-connector,llvtt/mongo-connector,mvivies/mongo-connector,carl0224/mongo-connector,gsuresh92/mongo-connector,adammendoza/mongo-connector,sat2050/mongo-connector,adgaudio/mongo-connector,asifhj/mongo-connector,drdk/mongo-connector,rohitkn/mongo-connector,Nagriar/mongo-connector,orchardmile/mongo-connector,apicht-spireon/mongo-connector,thelok/mongo-connector,LonelyPale/mongo-connector,gsuresh92/mongo-connector,RapidRatings/mongo-connector,jgerrish/mongo-connector,benjamine/mongo-connector,agarwal-karan/mongo-connector,tonyzhu/mongo-connector,vietanh85/mongo-connector,jaredkipe/mongo-connector,anuragkapur/mongo-connector,skijash/mongo-connector,hannelita/mongo-connector,lzjun567/mongo-connector,mvivies/mongo-connector,10gen-labs/mongo-connector,ShaneHarvey/mongo-connector,YuriyIlyin/mongo-connector,rassor/mongo-connector,sachinkeshav/mongo-connector,dacostaMetaphor/mongo-connector,banzo/mongo-connector,Nagriar/mongo-connector,kpsarthi/mongo-connector | test/solr_doc_manager_tester.py | test/solr_doc_manager_tester.py | import unittest
import time
from solr_doc_manager import SolrDocManager
from pysolr import Solr
class SolrDocManagerTester(unittest.TestCase):
def __init__(self):
super(SolrDocManagerTester, self).__init__()
self.solr = Solr("http://localhost:8080/solr/")
def runTest(self):
#Invalid URL
s = SolrDocManager("http://doesntexist.cskjdfhskdjfhdsom")
self.assertTrue(s.solr is None)
#valid URL
SolrDoc = SolrDocManager("http://localhost:8080/solr/")
self.solr.delete(q ='*:*')
#test upsert
docc = {'_id': '1', 'name': 'John'}
SolrDoc.upsert([docc])
self.solr.commit()
res = self.solr.search('*:*')
for doc in res:
self.assertTrue(doc['_id'] == '1' and doc['name'] == 'John')
docc = {'_id': '1', 'name': 'Paul'}
SolrDoc.upsert([docc])
self.solr.commit()
res = self.solr.search('*:*')
for doc in res:
self.assertTrue(doc['_id'] == '1' and doc['name'] == 'Paul')
#test remove
SolrDoc.remove('1')
self.solr.commit()
res = self.solr.search('*:*')
self.assertTrue(len(res) == 0)
#test search
docc = {'_id': '1', 'name': 'John'}
SolrDoc.upsert([docc])
docc = {'_id': '2', 'name': 'Paul'}
SolrDoc.upsert([docc])
self.solr.commit()
search = SolrDoc.search('*:*')
search2 = self.solr.search('*:*')
self.assertTrue(len(search) == len(search2))
self.assertTrue(len(search) != 0)
for i in range(0,len(search)):
self.assertTrue(list(search)[i] == list(search2)[i])
#test solr commit
docc = {'_id': '3', 'name': 'Waldo'}
SolrDoc.upsert([docc])
res = SolrDoc.search('Waldo')
assert(len(res) == 0)
time.sleep(1)
res = SolrDoc.search('Waldo')
assert(len(res) != 0)
#test get last doc
docc = {'_id': '4', 'name': 'Hare', 'ts': '2'}
SolrDoc.upsert([docc])
docc = {'_id': '5', 'name': 'Tortoise', 'ts': '1'}
SolrDoc.upsert([docc])
self.solr.commit()
doc = SolrDoc.get_last_doc()
self.assertTrue(doc['_id'] == '4')
docc = {'_id': '6', 'name': 'HareTwin', 'ts':'2'}
self.solr.commit()
doc = SolrDoc.get_last_doc()
self.assertTrue(doc['_id'] == '4' or doc['_id'] == '6');
| apache-2.0 | Python | |
4ca2ca05232357776e64a1e6eb76c0b26663a59e | add semigroup law tester | przemyslawjanpietrzak/pyMonet | testers/semigroup_law_tester.py | testers/semigroup_law_tester.py | class SemigroupLawTester:
def __init__(self, semigroup, value1, value2, value3):
self.semigroup = semigroup
self.value1 = value1
self.value2 = value2
self.value3 = value3
def associativity_test(self):
x = self.semigroup(self.value1)\
.concat(self.semigroup(self.value2))\
.concat(self.semigroup(self.value3))
y = self.semigroup(self.value1).concat(
self.semigroup(self.value2).self.semigroup(self.value3)
)
assert x == y
def test(self):
self.associativity_test()
| mit | Python | |
e0a037a6418b31275b5a00a1f78959e6dc25be17 | Add a script to fix bad svn properties | jrochas/scale-proactive,mnip91/proactive-component-monitoring,ow2-proactive/programming,paraita/programming,PaulKh/scale-proactive,acontes/programming,acontes/programming,mnip91/programming-multiactivities,mnip91/programming-multiactivities,PaulKh/scale-proactive,PaulKh/scale-proactive,acontes/programming,lpellegr/programming,acontes/programming,PaulKh/scale-proactive,ow2-proactive/programming,paraita/programming,fviale/programming,mnip91/programming-multiactivities,jrochas/scale-proactive,mnip91/proactive-component-monitoring,paraita/programming,fviale/programming,mnip91/proactive-component-monitoring,lpellegr/programming,ow2-proactive/programming,jrochas/scale-proactive,fviale/programming,mnip91/programming-multiactivities,fviale/programming,PaulKh/scale-proactive,fviale/programming,fviale/programming,lpellegr/programming,mnip91/proactive-component-monitoring,mnip91/proactive-component-monitoring,ow2-proactive/programming,mnip91/proactive-component-monitoring,acontes/programming,paraita/programming,lpellegr/programming,PaulKh/scale-proactive,acontes/programming,paraita/programming,lpellegr/programming,jrochas/scale-proactive,lpellegr/programming,PaulKh/scale-proactive,ow2-proactive/programming,mnip91/programming-multiactivities,jrochas/scale-proactive,mnip91/programming-multiactivities,jrochas/scale-proactive,paraita/programming,jrochas/scale-proactive,acontes/programming,ow2-proactive/programming | dev/scripts/fix_svn_properties.py | dev/scripts/fix_svn_properties.py | #!/usr/bin/evn python
import sys
import os
import subprocess
mapping = {
'.c': [['svn:eol-style', 'native']],
'.cpp': [['svn:eol-style', 'native']],
'.h': [['svn:eol-style', 'native']],
'.sh': [['svn:eol-style', 'native'], ['svn:executable', '']],
'.cmd': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'CRLF']],
'.bat': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'CRLF']],
'.txt': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.xml': [['svn:mime-type', 'text/xml'], ['svn:eol-style', 'native']],
'.ent': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.dtd': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.xsd': [['svn:mime-type', 'text/xml'], ['svn:eol-style', 'native']],
'.xsl': [['svn:mime-type', 'text/xml'], ['svn:eol-style', 'native']],
'.wsdl':[['svn:mime-type', 'text/xml'], ['svn:eol-style', 'native']],
'.htm': [['svn:mime-type', 'text/html'], ['svn:eol-style', 'native']],
'.html':[['svn:mime-type', 'text/html'], ['svn:eol-style', 'native']],
'.css': [['svn:mime-type', 'text/css'], ['svn:eol-style', 'native']],
'.js': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.jsp': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.txt': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.java':[['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.sql': [['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
'.doc': [['svn:mime-type', 'application/msword']],
'.exe': [['svn:mime-type', 'application/octet-stream']],
'.gif': [['svn:mime-type', 'image/gif']],
'.gz': [['svn:mime-type', 'application/x-gzip']],
'.jar': [['svn:mime-type', 'application/java-archive']],
'.jpg': [['svn:mime-type', 'image/jpeg']],
'.jpeg':[['svn:mime-type', 'image/jpeg']],
'.pdf': [['svn:mime-type', 'application/pdf']],
'.png': [['svn:mime-type', 'image/png']],
'.tgz': [['svn:mime-type', 'application/octet-stream']],
'.zip': [['svn:mime-type', 'application/zip']],
'.class':[['svn:mime-type', 'application/java']],
'Makefile':[['svn:eol-style', 'native']],
'.properties':[['svn:mime-type', 'text/plain'], ['svn:eol-style', 'native']],
}
def fix_svn_prop(file):
for ext in mapping:
if file.endswith(ext):
for prop in mapping[ext]:
cmd = ['svn', 'propset', prop[0], prop[1], file]
retcode = subprocess.call(cmd)
if retcode != 0:
print >> sys.stderr, "svn propset failed on %s %s. Do not commit, revert and please contact me" % (file, prop)
sys.exit(1)
def walk(root):
print root
for root, dirs, files in os.walk(root):
for file in files:
fix_svn_prop(os.path.join(root, file))
for dir in ['.git', '.svn']:
if dir in dirs:
dirs.remove(dir)
if __name__ == "__main__":
walk(os.path.abspath(os.getcwd()))
| agpl-3.0 | Python | |
1c5fef3a34ed421610a4e9a38feb07e6545e5d13 | Add tests for the `dirty_untar` rule | PLNech/thefuck,AntonChankin/thefuck,SimenB/thefuck,gogobebe2/thefuck,BertieJim/thefuck,lawrencebenson/thefuck,barneyElDinosaurio/thefuck,vanita5/thefuck,mcarton/thefuck,redreamality/thefuck,subajat1/thefuck,mbbill/thefuck,vanita5/thefuck,qingying5810/thefuck,thinkerchan/thefuck,bigplus/thefuck,mlk/thefuck,AntonChankin/thefuck,SimenB/thefuck,subajat1/thefuck,MJerty/thefuck,ostree/thefuck,bugaevc/thefuck,princeofdarkness76/thefuck,Clpsplug/thefuck,manashmndl/thefuck,thesoulkiller/thefuck,LawrenceHan/thefuck,NguyenHoaiNam/thefuck,sekaiamber/thefuck,beni55/thefuck,mlk/thefuck,Clpsplug/thefuck,hxddh/thefuck,PLNech/thefuck,scorphus/thefuck,levythu/thefuck,barneyElDinosaurio/thefuck,nvbn/thefuck,Aeron/thefuck,bigplus/thefuck,thinkerchan/thefuck,manashmndl/thefuck,mcarton/thefuck,MJerty/thefuck,roth1002/thefuck,princeofdarkness76/thefuck,scorphus/thefuck,BertieJim/thefuck,beni55/thefuck,zhangzhishan/thefuck,hxddh/thefuck,lawrencebenson/thefuck,levythu/thefuck,LawrenceHan/thefuck,nvbn/thefuck,artiya4u/thefuck,thesoulkiller/thefuck,ostree/thefuck,roth1002/thefuck,redreamality/thefuck | tests/rules/test_dirty_untar.py | tests/rules/test_dirty_untar.py | import os
import pytest
import tarfile
from thefuck.rules.dirty_untar import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def tar_error(tmpdir):
def fixture(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with tarfile.TarFile(path, 'w') as archive:
for file in ('a', 'b', 'c'):
with open(file, 'w') as f:
f.write('*')
archive.add(file)
os.remove(file)
with tarfile.TarFile(path, 'r') as archive:
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {filename, 'a', 'b', 'c'})
return fixture
parametrize_filename = pytest.mark.parametrize('filename', [
'foo.tar',
'foo.tar.gz',
'foo.tgz'])
parametrize_script = pytest.mark.parametrize('script, fixed', [
('tar xvf {}', 'mkdir -p foo && tar xvf {} -C foo'),
('tar -xvf {}', 'mkdir -p foo && tar -xvf {} -C foo'),
('tar --extract -f {}', 'mkdir -p foo && tar --extract -f {} -C foo')])
@parametrize_filename
@parametrize_script
def test_match(tar_error, filename, script, fixed):
tar_error(filename)
assert match(Command(script=script.format(filename)), None)
@parametrize_filename
@parametrize_script
def test_side_effect(tar_error, filename, script, fixed):
tar_error(filename)
side_effect(Command(script=script.format(filename)), None)
assert(os.listdir('.') == [filename])
@parametrize_filename
@parametrize_script
def test_get_new_command(tar_error, filename, script, fixed):
tar_error(filename)
assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)
| mit | Python | |
c5276d469b08b3262490047f2372a477814cb2fc | add server test for statelessCompute | radiasoft/sirepo,radiasoft/sirepo,radiasoft/sirepo,radiasoft/sirepo,mkeilman/sirepo,mkeilman/sirepo,mkeilman/sirepo,radiasoft/sirepo,mkeilman/sirepo | tests/stateless_compute_test.py | tests/stateless_compute_test.py | # -*- coding: utf-8 -*-
u"""Test statelessCompute API
:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
import pytest
def test_madx_calculate_bunch_parameters(fc):
from pykern import pkunit
r = _do(fc, 'calculate_bunch_parameters')
pkunit.pkok(r.command_beam, 'unexpected response={}', r)
def test_uknown_method(fc):
from pykern import pkunit
m = 'uknown'
r = _do(fc, m)
pkunit.pkre(f'method={m} not defined in schema', r.error)
def _do(fc, method):
t = 'madx'
d = fc.sr_sim_data(sim_name='FODO PTC', sim_type=t)
return fc.sr_post(
'statelessCompute',
PKDict(
bunch=d.models.bunch,
command_beam=d.models.command_beam,
method=method,
simulationId=d.models.simulation.simulationId,
simulationType=t,
variables=d.models.rpnVariables,
),
)
| apache-2.0 | Python | |
9b8069f66988ccdbfc76fdbbc7efb78285ed9900 | Bump version to S22.1 | AleksNeStu/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,hyperNURb/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,vladan-m/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,vladan-m/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,plamut/ggrc-core,vladan-m/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,vladan-m/ggrc-core,uskudnik/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core | src/ggrc/settings/default.py | src/ggrc/settings/default.py | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
DEBUG = False
TESTING = False
# Flask-SQLAlchemy fix to be less than `wait_time` in /etc/mysql/my.cnf
SQLALCHEMY_POOL_RECYCLE = 120
# Settings in app.py
AUTOBUILD_ASSETS = False
ENABLE_JASMINE = False
DEBUG_ASSETS = False
FULLTEXT_INDEXER = None
USER_PERMISSIONS_PROVIDER = None
EXTENSIONS = []
exports = []
# Deployment-specific variables
COMPANY = "Company, Inc."
COMPANY_LOGO_TEXT = "Company GRC"
VERSION = "s22.1"
# Initialize from environment if present
import os
SQLALCHEMY_DATABASE_URI = os.environ.get('GGRC_DATABASE_URI', '')
SECRET_KEY = os.environ.get('GGRC_SECRET_KEY', 'Replace-with-something-secret')
| # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
DEBUG = False
TESTING = False
# Flask-SQLAlchemy fix to be less than `wait_time` in /etc/mysql/my.cnf
SQLALCHEMY_POOL_RECYCLE = 120
# Settings in app.py
AUTOBUILD_ASSETS = False
ENABLE_JASMINE = False
DEBUG_ASSETS = False
FULLTEXT_INDEXER = None
USER_PERMISSIONS_PROVIDER = None
EXTENSIONS = []
exports = []
# Deployment-specific variables
COMPANY = "Company, Inc."
COMPANY_LOGO_TEXT = "Company GRC"
VERSION = "s22"
# Initialize from environment if present
import os
SQLALCHEMY_DATABASE_URI = os.environ.get('GGRC_DATABASE_URI', '')
SECRET_KEY = os.environ.get('GGRC_SECRET_KEY', 'Replace-with-something-secret')
| apache-2.0 | Python |
6c22f7bf2fe8db39446cddbd0fa9474486101a27 | Add __init__, as django test finder isn't very smart | BenMotz/cubetoolkit,BenMotz/cubetoolkit,BenMotz/cubetoolkit,BenMotz/cubetoolkit | toolkit/diary/tests/__init__.py | toolkit/diary/tests/__init__.py | from __future__ import absolute_import
from .test_edit_views import *
from .test_mailout_view import *
from .test_models import *
from .test_public_views import *
| agpl-3.0 | Python | |
623ea9e3d050f347eb404094d049a402b2bb367a | Create config.py | fnielsen/dasem,fnielsen/dasem | dasem/config.py | dasem/config.py | """config"""
from os.path import expanduser, join
def data_directory():
return join(expanduser('~'), 'dasem_data')
| apache-2.0 | Python | |
e35586efcfc0af4dcfe02c005a1435767f5ab3ed | add merge_book_lists.py | michael-ruan/crawler | douban_spider/merge_book_lists.py | douban_spider/merge_book_lists.py | # -*- coding: UTF-8 -*-
import bloom_filter
import sys
# 把str编码由默认ascii(python2为ascii,python3为utf8)改为utf8
reload(sys)
sys.setdefaultencoding('utf8')
"""
Merge book list files into one, using bloom filter to remove duplicate books
"""
def main():
file_name = 'book_list'
bf = bloom_filter.BloomFilter(2000,14)
with open(file_name, 'a') as fn:
for i in range(len(sys.argv)):
if i > 0:
with open(sys.argv[i]) as f:
for line in f.readlines():
if line != '\n' and bf.add(line.strip()) == False:
fn.write(line.strip() + '\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print "必须输入至少两个参数(书籍列表文件)"
else:
main() | mit | Python | |
62fb38d0860b5feeee39764b6c66f5ceed39b984 | Fix versions of protected/unprotected documents | c2corg/v6_api,c2corg/v6_api,c2corg/v6_api | alembic_migration/versions/077ddf78a1f3_fix_protected_docs_versions.py | alembic_migration/versions/077ddf78a1f3_fix_protected_docs_versions.py | """Fix protected docs versions
Revision ID: 077ddf78a1f3
Revises: 9739938498a8
Create Date: 2017-10-30 12:05:51.679435
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '077ddf78a1f3'
down_revision = '9739938498a8'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
with versions_from_archives as (
select document_id, max(version) as version
from guidebook.documents_archives
group by document_id
)
update guidebook.documents as d
set version = va.version
from versions_from_archives va
where d.document_id = va.document_id""")
def downgrade():
# Not reversible
pass
| agpl-3.0 | Python | |
ebd8d2fb86b925f3c75ddfea0bbe9d7ab60b50b7 | add notes for subprocess module | mcxiaoke/python-labs,mcxiaoke/python-labs,mcxiaoke/python-labs,mcxiaoke/python-labs,mcxiaoke/python-labs | abc/sub_process.py | abc/sub_process.py | # -*- coding: UTF-8 -*-
__author__ = 'mcxiaoke'
import subprocess
# 创建子进程并等待它返回,参数是list
subprocess.call(['ls', '-a'])
# 同上,但是子进程返回值不是0时会抛异常
subprocess.check_call(['ls', '-a'])
# subprocess.check_call(['ls2', '-la'])
# 同上,但是返回值以字符串的形式返回
# 如果要捕获标准错误输出,可以用stderr=subprocess.STDOUT
ret = subprocess.check_output(['ls', '-a'])
print ret
| apache-2.0 | Python | |
36b4e37972501ce9fa84e9d74a3cfe726681209e | Add tests for numpy array ufuncs | stefanseefeld/numba,gdementen/numba,pitrou/numba,gdementen/numba,pitrou/numba,numba/numba,stuartarchibald/numba,GaZ3ll3/numba,GaZ3ll3/numba,GaZ3ll3/numba,GaZ3ll3/numba,ssarangi/numba,stefanseefeld/numba,IntelLabs/numba,gmarkall/numba,pombredanne/numba,ssarangi/numba,stonebig/numba,numba/numba,GaZ3ll3/numba,cpcloud/numba,sklam/numba,cpcloud/numba,ssarangi/numba,seibert/numba,stefanseefeld/numba,seibert/numba,jriehl/numba,seibert/numba,cpcloud/numba,IntelLabs/numba,pitrou/numba,IntelLabs/numba,ssarangi/numba,cpcloud/numba,pombredanne/numba,pombredanne/numba,stefanseefeld/numba,jriehl/numba,stuartarchibald/numba,stuartarchibald/numba,pombredanne/numba,stuartarchibald/numba,cpcloud/numba,seibert/numba,gmarkall/numba,pombredanne/numba,sklam/numba,numba/numba,numba/numba,sklam/numba,ssarangi/numba,stonebig/numba,numba/numba,gdementen/numba,gdementen/numba,IntelLabs/numba,jriehl/numba,jriehl/numba,stuartarchibald/numba,gdementen/numba,stonebig/numba,seibert/numba,pitrou/numba,gmarkall/numba,stonebig/numba,gmarkall/numba,sklam/numba,stefanseefeld/numba,sklam/numba,pitrou/numba,IntelLabs/numba,stonebig/numba,gmarkall/numba,jriehl/numba | numba/tests/test_ufuncs.py | numba/tests/test_ufuncs.py | from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
# unary ufuncs
def absolute_ufunc(x, result):
np.absolute(x, result)
def exp_ufunc(x, result):
np.exp(x, result)
def sin_ufunc(x, result):
np.sin(x, result)
def cos_ufunc(x, result):
np.cos(x, result)
def tan_ufunc(x, result):
np.tan(x, result)
# binary ufuncs
def add_ufunc(x, y, result):
np.add(x, y, result)
def subtract_ufunc(x, y, result):
np.subtract(x, y, result)
def multiply_ufunc(x, y, result):
np.multiply(x, y, result)
def divide_ufunc(x, y, result):
np.divide(x, y, result)
class TestUFuncs(unittest.TestCase):
def test_unary_ufuncs(self):
ufunc_list = [absolute_ufunc, exp_ufunc, sin_ufunc, cos_ufunc,
tan_ufunc]
arraytypes = [types.Array(types.int32, 1, 'C'),
types.Array(types.int64, 1, 'C'),
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')]
x_operands = [np.arange(-10, 10, dtype='i4'),
np.arange(-10, 10, dtype='i8'),
np.arange(-1, 1, 0.1, dtype='f4'),
np.arange(-1, 1, 0.1, dtype='f8')]
for arraytype, x_operand in zip(arraytypes, x_operands):
for ufunc in ufunc_list:
pyfunc = ufunc
cr = compile_isolated(pyfunc, (arraytype, arraytype))
cfunc = cr.entry_point
result = np.zeros(x_operand.size, dtype=x_operand.dtype)
cfunc(x_operand, result)
control = np.zeros(x_operand.size, dtype=x_operand.dtype)
ufunc(x_operand, control)
self.assertTrue((result == control).all())
def test_binary_ufuncs(self):
ufunc_list = [add_ufunc, subtract_ufunc, multiply_ufunc, divide_ufunc]
arraytypes = [types.Array(types.int32, 1, 'C'),
types.Array(types.int64, 1, 'C'),
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')]
xy_operands = [np.arange(-10, 10, dtype='i4'),
np.arange(-10, 10, dtype='i8'),
np.arange(-1, 1, 0.1, dtype='f4'),
np.arange(-1, 1, 0.1, dtype='f8')]
for arraytype, xy_operand in zip(arraytypes, xy_operands):
for ufunc in ufunc_list:
pyfunc = ufunc
cr = compile_isolated(pyfunc, (arraytype, arraytype, arraytype))
cfunc = cr.entry_point
result = np.zeros(xy_operand.size, dtype=xy_operand.dtype)
cfunc(xy_operand, xy_operand, result)
control = np.zeros(xy_operand.size, dtype=xy_operand.dtype)
ufunc(xy_operand, xy_operand, control)
self.assertTrue((result == control).all())
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python | |
47f61ce40319100b7f226538a466c584d22d4f72 | Add character filtering benchmark | coblo/isccbench | iscc_bench/textid/bench_remove.py | iscc_bench/textid/bench_remove.py | # -*- coding: utf-8 -*-
"""Benchmark Character Removals
[Cc] Other, Control
[Cf] Other, Format
[Cn] Other, Not Assigned (no characters in the file have this property)
[Co] Other, Private Use
[Cs] Other, Surrogate
[LC] Letter, Cased
[Ll] Letter, Lowercase
[Lm] Letter, Modifier
[Lo] Letter, Other
[Lt] Letter, Titlecase
[Lu] Letter, Uppercase
[Mc] Mark, Spacing Combining
[Me] Mark, Enclosing
[Mn] Mark, Nonspacing
[Nd] Number, Decimal Digit
[Nl] Number, Letter
[No] Number, Other
[Pc] Punctuation, Connector
[Pd] Punctuation, Dash
[Pe] Punctuation, Close
[Pf] Punctuation, Final quote (may behave like Ps or Pe depending on usage)
[Pi] Punctuation, Initial quote (may behave like Ps or Pe depending on usage)
[Po] Punctuation, Other
[Ps] Punctuation, Open
[Sc] Symbol, Currency
[Sk] Symbol, Modifier
[Sm] Symbol, Math
[So] Symbol, Other
[Zl] Separator, Line
[Zp] Separator, Paragraph
[Zs] Separator, Space
Benchmark Results:
Total 150.59351921081543 ms for remove_translate
Total 236.33980751037598 ms for remove_loop_set
Total 471.7671871185303 ms for remove_loop_cat
Total 212651.8795490265 ms for remove_regex
"""
import re
import time
import unicodedata
from os.path import exists
from tqdm import tqdm
from iscc_bench.readers.gutenberg import gutenberg
from iscc_bench.textid.unicode_blocks import codepoints
from iscc_bench.utils import load_text_file
FILTER_CATEGORIES = "CPZ"
def generate_blacklist() -> str:
all_chars = (chr(i) for i in codepoints())
bl = "".join(
c for c in all_chars if unicodedata.category(c)[0] in FILTER_CATEGORIES
)
return bl
def save_blacklist():
data = generate_blacklist().encode("utf8", "ignore")
with open(f"blacklist_{FILTER_CATEGORIES}.txt", "wb") as outf:
outf.write(data)
def load_blacklist():
fname = f"blacklist_{FILTER_CATEGORIES}.txt"
if not exists(fname):
save_blacklist()
with open(fname, "r", encoding="utf8") as infile:
bl = infile.read()
return bl
all_chars = (chr(i) for i in codepoints())
blacklist = "".join(
c for c in all_chars if unicodedata.category(c)[0] in FILTER_CATEGORIES
)
blset = {c for c in blacklist}
blacklist_tbl = str.maketrans(dict.fromkeys(blset))
blacklist_re = re.compile("[%s]" % re.escape(blacklist))
print(f"Size of blacklisted character set: {len(blset)}\n")
def remove_translate(text):
text = unicodedata.normalize("NFC", text)
text = text.translate(blacklist_tbl)
return text.lower()
def remove_loop_set(text):
text = unicodedata.normalize("NFC", text)
out = []
for c in text:
if c not in blset:
out.append(c.lower())
return "".join(out)
def remove_loop_cat(text):
text = unicodedata.normalize("NFC", text)
out = []
for c in text:
if unicodedata.category(c)[0] not in FILTER_CATEGORIES:
out.append(c.lower())
return "".join(out)
def remove_regex(text):
text = unicodedata.normalize("NFC", text)
text = text.lower()
return blacklist_re.sub("", text)
funcs = (remove_loop_cat, remove_loop_set, remove_translate, remove_regex)
def benchmark():
for func in funcs:
rt = 0
print(f"Benchmarking {func.__name__}:")
for fp in list(gutenberg())[:3]:
text = load_text_file(fp)
start = time.time()
result = func(text)
end = time.time()
rt += (end - start) * 1000
print(f"Total {rt} ms for {func.__name__}\n\n")
if __name__ == "__main__":
benchmark()
| bsd-2-clause | Python | |
0e6fb27d26d5f0570baa414e679b96d6c3234491 | add correct loop file (#8) | CrookedY/AirPollutionBot | looptogetdata2.py | looptogetdata2.py | from urllib2 import Request, urlopen, URLError
import json
import pandas
def getValidTimeseriesKey(timerseries_keys, offering_id):
invalid_offering = '9999999999'
if offering_id == invalid_offering:
return timeseries_keys[1]
else:
return timeseries_keys[0]
requestpoll = Request ('http://dd.eionet.europa.eu/vocabulary/aq/pollutant/json')
try:
response = urlopen(requestpoll)
pollutant_prop = response.read()
except URLError, e:
print 'error:', e
json_pollutantlist = json.loads(pollutant_prop)
jsonpollutantlistdictionaries = json_pollutantlist[u'concepts']
listofpollutants = {}
for pollutant in jsonpollutantlistdictionaries:
statID = pollutant['@id']
pollutantname = pollutant[u'prefLabel'][0]['@value']
listofpollutants.update ({statID:pollutantname})
allstations = pandas.read_csv("liststations.csv")
allstations = allstations.drop('Unnamed: 0', 1)
allstations = allstations.set_index('ID')
ID=(3907, 3903)
listofstationsdata = []
for i in ID:
url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(i))
request2 = Request (url)
try:
response = urlopen(request2)
station_data = response.read()
except URLError, e:
print 'error:', e
station_prop_json = json.loads (station_data)
station_time_series = station_prop_json[u'properties'][u'timeseries']
timeseries_keys = (station_time_series.keys())
first_timeseries = station_time_series[timeseries_keys[0]]
offering_id = first_timeseries[u'offering'][u'id']
first_timeserieskey = getValidTimeseriesKey(timeseries_keys, offering_id)
station_pollutant = first_timeseries[u'category'][u'id']
station_ID = first_timeseries[u'feature'][u'id']
StationName = allstations.loc[(int(station_ID) , 'place')]
PollutantName = listofpollutants.get(station_pollutant)
url2getdata = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(first_timeserieskey) +'/getData')
request_time_series_data = Request(url2getdata)
try:
response = urlopen(request_time_series_data)
time_series_data = response.read()
except URLError, e:
print 'error:', e
listofstationsdata.append((StationName, PollutantName, time_series_data))
print listofstationsdata
| apache-2.0 | Python | |
eb46f8046211eff81320faceda0c297b27bb419b | Add a new alert plugin for events from geomodel | mozilla/MozDef,Phrozyn/MozDef,ameihm0912/MozDef,jeffbryner/MozDef,netantho/MozDef,gdestuynder/MozDef,mpurzynski/MozDef,gdestuynder/MozDef,jeffbryner/MozDef,mozilla/MozDef,jeffbryner/MozDef,netantho/MozDef,mpurzynski/MozDef,mozilla/MozDef,ameihm0912/MozDef,netantho/MozDef,mpurzynski/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,ameihm0912/MozDef,gdestuynder/MozDef,netantho/MozDef,Phrozyn/MozDef,mozilla/MozDef,mpurzynski/MozDef,ameihm0912/MozDef,Phrozyn/MozDef,Phrozyn/MozDef | alerts/geomodel.py | alerts/geomodel.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
#
# Contributors:
# Aaron Meihm <ameihm@mozilla.com>
from lib.alerttask import AlertTask
import pyes
class AlertGeomodel(AlertTask):
def main(self):
date_timedelta = dict(minutes=30)
must = [
pyes.TermFilter('_type', 'geomodel'),
pyes.TermFilter('category', 'geomodelnotice'),
]
self.filtersManual(date_timedelta, must=must, must_not=[])
self.searchEventsSimple()
self.walkEvents()
# Set alert properties
def onEvent(self, event):
category = 'geomodel'
tags = ['geomodel']
severity = 'NOTICE'
summary = event['_source']['summary']
return self.createAlertDict(summary, category, tags, [event], severity)
| mpl-2.0 | Python | |
6898b9462823449e767aa75b7ab38c3e87b61cc1 | Check for page changes | wave2/moinmoinextensions | macro/IsRecent.py | macro/IsRecent.py | # -*- coding: iso-8859-1 -*-
u"""
IsRecent - Check if a page was recently modified and highlight that fact
@copyright: 2012 by Alan Snelson
@license: BSD, see LICENSE for details.
"""
from datetime import datetime
from MoinMoin.Page import Page
Dependencies = ['pages']
def macro_IsRecent(macro, pageName=''):
fmt = macro.formatter
if (pageName == ''):
return fmt.text('No page supplied')
request = macro.request
page = Page(request,pageName)
log = page.lastEditInfo(request)
now = datetime.now()
delta = now - datetime.strptime(log['time'], "%Y-%m-%d %H:%M:%S")
if (delta.days > 7):
return fmt.rawHTML("<a href='http://mywiki" + pageName + "'>" + pageName + "</a>")
else:
return fmt.rawHTML("<a style='color:black;font-size:20px;' href='http://mywiki" + pageName + "'>" + pageName + "</a>")
| bsd-3-clause | Python | |
fe5369253a79b9ec42d8b438112cd7e0eb61955a | Add multiple viewport example | Eric89GXL/vispy,sh4wn/vispy,bollu/vispy,inclement/vispy,dchilds7/Deysha-Star-Formation,jdreaver/vispy,dchilds7/Deysha-Star-Formation,julienr/vispy,sh4wn/vispy,QuLogic/vispy,inclement/vispy,srinathv/vispy,michaelaye/vispy,sbtlaarzc/vispy,RebeccaWPerry/vispy,julienr/vispy,drufat/vispy,jdreaver/vispy,ghisvail/vispy,RebeccaWPerry/vispy,ghisvail/vispy,RebeccaWPerry/vispy,hronoses/vispy,sh4wn/vispy,Eric89GXL/vispy,drufat/vispy,Eric89GXL/vispy,kkuunnddaannkk/vispy,jay3sh/vispy,dchilds7/Deysha-Star-Formation,srinathv/vispy,bollu/vispy,drufat/vispy,jdreaver/vispy,hronoses/vispy,jay3sh/vispy,kkuunnddaannkk/vispy,michaelaye/vispy,QuLogic/vispy,inclement/vispy,michaelaye/vispy,ghisvail/vispy,kkuunnddaannkk/vispy,jay3sh/vispy,bollu/vispy,sbtlaarzc/vispy,julienr/vispy,sbtlaarzc/vispy,srinathv/vispy,QuLogic/vispy,hronoses/vispy | examples/multiple_viewports/main.py | examples/multiple_viewports/main.py | '''
Created on 03/03/2012
@author: adam
'''
import math
from pyglet.gl import *
import pyglet
# over-ride the default pyglet idle loop
import renderer.idle
import renderer.window
from renderer.viewport import Viewport
from renderer.projection_view_matrix import ProjectionViewMatrix
from scene.scene_node import SceneNode
from scene.render_callback_node import RenderCallbackNode
from scene.camera_node import CameraNode
import maths.quaternion
from examples.render_callbacks import grid
class Application( object ):
def __init__( self ):
super( Application, self ).__init__()
# setup our opengl requirements
config = pyglet.gl.Config(
depth_size = 16,
double_buffer = True
)
# create our window
self.window = pyglet.window.Window(
fullscreen = False,
width = 1024,
height = 768,
config = config
)
# create a viewport that spans
# the entire screen
self.viewport = Viewport(
[ 0.0, 0.0, 1.0, 1.0 ]
)
# make the viewport slightly off the edge
# to avoid seams showing through
self.viewport_2 = Viewport(
[ 0.7, -0.01, 0.31, 0.3]
)
# setup our scene
self.setup_scene()
# setup our update loop the app
# we'll render at 60 fps
frequency = 60.0
self.update_delta = 1.0 / frequency
self.fps_display = pyglet.clock.ClockDisplay()
# use a pyglet callback for our render loop
pyglet.clock.schedule_interval(
self.step,
self.update_delta
)
print "Rendering at %iHz" % int(frequency)
def setup_scene( self ):
# create a scene
self.scene_node = SceneNode( '/root' )
# create a grid to render
self.grid_node = RenderCallbackNode(
'/grid',
grid.initialise_grid,
grid.render_grid
)
self.scene_node.add_child( self.grid_node )
# rotate the mesh so it is tilting forward
self.grid_node.rotate_object_x( math.pi / 4.0 )
# move the grid backward so we can see it
self.grid_node.translate_inertial_z( -80.0 )
# create a camera and a view matrix
self.view_matrix = ProjectionViewMatrix(
fov = 60.0,
near_clip = 1.0,
far_clip = 200.0
)
self.camera = CameraNode(
'/camera',
self.view_matrix
)
self.scene_node.add_child( self.camera )
# set the viewports camera
self.viewport.set_camera( self.scene_node, self.camera )
self.viewport_2.set_camera( self.scene_node, self.camera )
def run( self ):
pyglet.app.run()
def step( self, dt ):
# rotate the mesh about it's own vertical axis
self.grid_node.rotate_object_y( dt )
# render the scene
viewports = [
self.viewport,
self.viewport_2
]
renderer.window.render( self.window, viewports )
# render the fps
self.fps_display.draw()
# display the frame buffer
self.window.flip()
def main():
# create app
app = Application()
app.run()
app.window.close()
if __name__ == "__main__":
main()
| bsd-3-clause | Python | |
3587666f209a9e88672e9520c033682fcd28035a | add l10n_br_purchase/procurement.py | akretion/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil | l10n_br_purchase/procurement.py | l10n_br_purchase/procurement.py | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2014 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, api
class ProcurementOrder(models.Model):
_inherit = "procurement.order"
@api.model
def _run_move_create(self, procurement):
result = super(ProcurementOrder, self)._run_move_create(procurement)
if procurement.purchase_line_id:
result.update({
'fiscal_category_id': procurement.purchase_line_id.fiscal_category_id.id,
'fiscal_position': procurement.purchase_line_id.fiscal_position.id,
})
return result | agpl-3.0 | Python | |
b1d643afb07cef02ab607943776ce120a7d47013 | move unit test for matrix-vector conversion to new superoperator test module | cgranade/qutip,anubhavvardhan/qutip,zasdfgbnm/qutip,cgranade/qutip,zasdfgbnm/qutip,qutip/qutip,qutip/qutip,anubhavvardhan/qutip | qutip/tests/test_superoperator.py | qutip/tests/test_superoperator.py | # This file is part of QuTIP.
#
# QuTIP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTIP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTIP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011, Paul D. Nation & Robert J. Johansson
#
###########################################################################
from numpy.testing import assert_, run_module_suite
from qutip import *
class TestMatrixVector:
"""
A test class for the QuTiP function for matrix/vector conversion.
"""
def testMatrixVectorMatrix(self):
"""
Superoperator: Conversion matrix to vector to matrix
"""
M = rand(10, 10)
V = mat2vec(M)
M2 = vec2mat(V)
assert_(norm(M-M2) == 0.0)
def testVectorMatrixVector(self):
"""
Superoperator: Conversion vector to matrix to vector
"""
V = rand(100) # a row vector
M = vec2mat(V)
V2 = mat2vec(M).T # mat2vec returns a column vector
assert_(norm(V-V2) == 0.0)
def testVectorMatrixIndexConversion(self):
"""
Superoperator: Conversion between matrix and vector indices
"""
N = 10
for I in range(N*N):
i,j = vec2mat_index(N, I)
I2 = mat2vec_index(N, i, j)
assert_(I == I2)
def testVectorMatrixIndexCompability(self):
"""
Superoperator: Test compability between matrix/vector conversion and the corresponding index conversion.
"""
N = 10
M = rand(N, N)
V = mat2vec(M)
for I in range(N*N):
i,j = vec2mat_index(N, I)
assert_(V[I][0] == M[i,j])
if __name__ == "__main__":
run_module_suite() | bsd-3-clause | Python | |
b39dd2afea1f4662e17a927e7e6aa41e850f7470 | Add a script for generating jamo character table | GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters | lib/gen-hangul.py | lib/gen-hangul.py | #!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Jamo.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
chars = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
data, _comment = line.split('#', 2)
codepoint, short_name = data.split(';')
short_name = short_name.strip()
chars.append((codepoint, short_name))
return chars
def write(self, data):
print('''\
struct HangulCharacter
{
gunichar uc;
const char *short_name;
};''')
print('static const struct HangulCharacter hangul_chars[] =\n {')
s = ''
offset = 0
for codepoint, short_name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, short_name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
| bsd-3-clause | Python | |
415e3e1ae3a6c5689f3960d2b3f589cf2c733144 | Create conf.py | ging/fi-ware-idm,ging/fi-ware-idm,ging/fi-ware-idm | conf.py | conf.py | # -*- coding: utf-8 -*-
#
import os
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Override default css to get a larger width for local build
def setup(app):
app.add_stylesheet('mystyle.css')
else:
# Override default css to get a larger width for ReadTheDoc build
html_context = {
'css_files': [
'_static/mystyle.css',
],
}
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'FIWARE-SDC'
| apache-2.0 | Python | |
72f1dab3fe50a552480df522f6c8c4a7002a0952 | Add TimestampsMixin exmples | absent1706/sqlalchemy-mixins | examples/timestamp.py | examples/timestamp.py | from __future__ import print_function
import time
from datetime import datetime
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy_mixins import TimestampsMixin
Base = declarative_base()
engine = sa.create_engine("sqlite:///:memory:")
session = scoped_session(sessionmaker(bind=engine))
class BaseModel(Base, TimestampsMixin):
__abstract__ = True
pass
class User(BaseModel):
"""User Model to example."""
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
Base.metadata.create_all(engine)
print("Current time: ", datetime.utcnow())
# Current time: 2019-03-04 03:53:53.605602
bob = User(name="Bob")
session.add(bob)
session.flush()
print("Created Bob: ", bob.created_at)
# Created Bob: 2019-03-04 03:53:53.606765
print("Pre-update Bob: ", bob.updated_at)
# Pre-update Bob: 2019-03-04 03:53:53.606769
time.sleep(5)
bob.name = "Robert"
session.commit()
print("Updated Bob: ", bob.updated_at)
# Updated Bob: 2019-03-04 03:53:58.613044
| mit | Python | |
7799b7a3ea1b1774ce24376ee918376b422daebd | Create cube.py | botlabio/autonomio,botlabio/autonomio | cube.py | cube.py | import numpy as np
import pandas as pd
import keras
import pandas as pd
import keras.preprocessing.text
import somecode as some
class Cube:
'''
INTENDED USE > to be called through FastText() class.
Takes in pandas dataframe with at least two columns where one
is the dependent variable, and one is text.
EXAMPLE USE:
Cube(data,var)
If there is more than one possible depedent variable in df then
there you can run the moddle for any of it.
'''
def __init__(self,data,var):
self.data = data
self.var = var
self.x,self.y = self._data_sets()
self.x_train, self.y_train, self.x_test, self.y_test = self._split_data()
def _word_index(self):
out = []
i = 0
n = len(self.data)
for item in self.data.text:
temp = keras.preprocessing.text.one_hot(item, n, lower=True, split=" ")
out.insert(i,temp)
i += 1
return out
def _data_sets(self):
data = self.data.sample(frac=1)
x = self._word_index()
y = data[self.var]
return x,y
def _split_data(self):
length = len(self.x)
i = length - (length / 3)
self.x_test = self.x[:i]
self.x_test = np.array(self.x_test)
self.x_train = self.x[i+1:]
self.x_train = np.array(self.x_train)
self.y_test = self.y[:i]
self.y_test = np.array(self.y_test)
self.y_train = self.y[i+1:]
self.y_train = np.array(self.y_train)
return self.x_train, self.y_train, self.x_test, self.y_test
| mit | Python | |
67d3b321edab1fe50f666d0ada86c8392be07199 | add wire_callback | Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python | pyaudio/wire_callback.py | pyaudio/wire_callback.py | #!/usr/bin/env python
"""
PyAudio Example: Make a wire between input and output (i.e., record a
few samples and play them back immediately).
This is the callback (non-blocking) version.
"""
import pyaudio
import time
WIDTH = 2
CHANNELS = 2
RATE = 44100
p = pyaudio.PyAudio()
def callback(in_data, frame_count, time_info, status):
return (in_data, pyaudio.paContinue)
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
p.terminate()
| mit | Python | |
34815186871e27b977082d9c35dd0adc76d3af9f | update stencilview doc (128 levels, not 8) | Cheaterman/kivy,xiaoyanit/kivy,bliz937/kivy,jffernandez/kivy,tony/kivy,LogicalDash/kivy,xpndlabs/kivy,Shyam10/kivy,manthansharma/kivy,jehutting/kivy,bionoid/kivy,KeyWeeUsr/kivy,jkankiewicz/kivy,bhargav2408/kivy,Cheaterman/kivy,iamutkarshtiwari/kivy,andnovar/kivy,autosportlabs/kivy,CuriousLearner/kivy,bionoid/kivy,iamutkarshtiwari/kivy,kivy/kivy,rnixx/kivy,LogicalDash/kivy,KeyWeeUsr/kivy,arlowhite/kivy,tony/kivy,rafalo1333/kivy,jkankiewicz/kivy,manashmndl/kivy,ernstp/kivy,Ramalus/kivy,yoelk/kivy,yoelk/kivy,viralpandey/kivy,Ramalus/kivy,habibmasuro/kivy,rnixx/kivy,inclement/kivy,edubrunaldi/kivy,Shyam10/kivy,manashmndl/kivy,bliz937/kivy,janssen/kivy,mSenyor/kivy,LogicalDash/kivy,Cheaterman/kivy,ernstp/kivy,matham/kivy,darkopevec/kivy,akshayaurora/kivy,Ramalus/kivy,inclement/kivy,CuriousLearner/kivy,el-ethan/kivy,kivy/kivy,jegger/kivy,arlowhite/kivy,darkopevec/kivy,MiyamotoAkira/kivy,janssen/kivy,MiyamotoAkira/kivy,thezawad/kivy,MiyamotoAkira/kivy,ernstp/kivy,iamutkarshtiwari/kivy,jffernandez/kivy,rnixx/kivy,darkopevec/kivy,youprofit/kivy,MiyamotoAkira/kivy,rafalo1333/kivy,manthansharma/kivy,cbenhagen/kivy,Cheaterman/kivy,KeyWeeUsr/kivy,bhargav2408/kivy,arlowhite/kivy,habibmasuro/kivy,mSenyor/kivy,edubrunaldi/kivy,andnovar/kivy,aron-bordin/kivy,bionoid/kivy,cbenhagen/kivy,andnovar/kivy,cbenhagen/kivy,jffernandez/kivy,aron-bordin/kivy,jkankiewicz/kivy,xiaoyanit/kivy,bionoid/kivy,akshayaurora/kivy,darkopevec/kivy,kivy/kivy,xpndlabs/kivy,jkankiewicz/kivy,thezawad/kivy,Shyam10/kivy,matham/kivy,janssen/kivy,VinGarcia/kivy,rafalo1333/kivy,CuriousLearner/kivy,jehutting/kivy,viralpandey/kivy,habibmasuro/kivy,kived/kivy,ernstp/kivy,matham/kivy,kived/kivy,matham/kivy,vitorio/kivy,manthansharma/kivy,aron-bordin/kivy,jegger/kivy,janssen/kivy,autosportlabs/kivy,vitorio/kivy,KeyWeeUsr/kivy,manthansharma/kivy,jffernandez/kivy,aron-bordin/kivy,autosportlabs/kivy,el-ethan/kivy,youprofit/kivy,viralpandey/kivy,yoelk/kivy,vitorio/kivy,bhargav2408/kivy,kived/kivy,xiaoyanit/kivy,LogicalDash/kivy,mSenyor/kivy,Shyam10/kivy,akshayaurora/kivy,xpndlabs/kivy,el-ethan/kivy,VinGarcia/kivy,youprofit/kivy,jegger/kivy,jegger/kivy,yoelk/kivy,edubrunaldi/kivy,thezawad/kivy,VinGarcia/kivy,jehutting/kivy,tony/kivy,inclement/kivy,manashmndl/kivy,bliz937/kivy | kivy/uix/stencilview.py | kivy/uix/stencilview.py | '''
Stencil View
============
.. versionadded:: 1.0.4
:class:`StencilView` limits the drawing of child widgets to the StencilView's
bounding box. Any drawing outside the bounding box will be clipped (trashed).
The StencilView uses the stencil graphics instructions under the hood. It
provides an efficient way to clip the drawing area of children.
.. note::
As with the stencil graphics instructions, you cannot stack more than 128
stencil-aware widgets.
.. note::
StencilView is not a layout. Consequently, you have to manage the size and
position of its children directly. You can combine (subclass both)
a StencilView and a Layout in order to achieve a layout's behavior.
For example::
class BoxStencil(BoxLayout, StencilView):
pass
'''
__all__ = ('StencilView', )
from kivy.uix.widget import Widget
class StencilView(Widget):
'''StencilView class. See module documentation for more information.
'''
pass
| '''
Stencil View
============
.. versionadded:: 1.0.4
:class:`StencilView` limits the drawing of child widgets to the StencilView's
bounding box. Any drawing outside the bounding box will be clipped (trashed).
The StencilView uses the stencil graphics instructions under the hood. It
provides an efficient way to clip the drawing area of children.
.. note::
As with the stencil graphics instructions, you cannot stack more than 8
stencil-aware widgets.
.. note::
StencilView is not a layout. Consequently, you have to manage the size and
position of its children directly. You can combine (subclass both)
a StencilView and a Layout in order to achieve a layout's behavior.
For example::
class BoxStencil(BoxLayout, StencilView):
pass
'''
__all__ = ('StencilView', )
from kivy.uix.widget import Widget
class StencilView(Widget):
'''StencilView class. See module documentation for more information.
'''
pass
| mit | Python |
e8798ac01d3baed6785ee0683ec4989b97e47003 | Implement local.shell operation | Fizzadar/pyinfra,Fizzadar/pyinfra | pyinfra/modules/local.py | pyinfra/modules/local.py | # pyinfra
# File: pyinfra/modules/local.py
# Desc: run stuff locally, within the context of operations
from subprocess import Popen, PIPE
import gevent
from termcolor import colored
from pyinfra.api import operation
from pyinfra.api.util import read_buffer
def _run_local(code, hostname, host, print_output=False, print_prefix=None):
'''Subprocess based implementation of pyinfra/api/ssh.py's run_shell_command.'''
process = Popen(code, shell=True, stdout=PIPE, stderr=PIPE)
# Note that gevent's subprocess module does not allow for "live" reading from a process,
# so the readlines() calls below only return once the process is complete. Thus the whole
# greenlet spawning/etc below is *currently* pointless.
# TODO: implement fake file object as a pipe to read from/to as buffer, live
# see: https://bitbucket.org/eriks5/gevent-subprocess/src/550405f060a5f37167c0be042baaee6075b3d28e/src/gevsubprocess/pipe.py?at=default
stdout_reader = gevent.spawn(
read_buffer, process.stdout.readlines(),
print_output=print_output,
print_func=lambda line: u'{0}{1}'.format(print_prefix, line)
)
stderr_reader = gevent.spawn(
read_buffer, process.stderr.readlines(),
print_output=print_output,
print_func=lambda line: u'{0}{1}'.format(print_prefix, colored(line, 'red'))
)
# Wait for the process to complete & return
gevent.wait((stdout_reader, stderr_reader))
return process.wait() <= 0
@operation
def shell(*code):
'''Runs shell commands locally in a subprocess.'''
return [
(lambda *args, **kwargs: _run_local(c, *args, **kwargs), (), {})
for c in code
]
| mit | Python | |
a2b9a17927d851b368d3ef8e869a295c8bd2e86b | add test for default clustering order of SELECT | scylladb/scylla,scylladb/scylla,scylladb/scylla,scylladb/scylla | test/cql-pytest/test_clustering_order.py | test/cql-pytest/test_clustering_order.py | # Copyright 2022-present ScyllaDB
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#############################################################################
# Tests for clustering key ordering, namely the WITH CLUSTERING ORDER BY
# setting in the table schema, and ORDER BY in select.
#
# We have many other tests for this feature - in C++ tests, in translated
# unit tests from Cassandra (cassandra_tests), and its interaction with
# other features (filtering, secondary indexes, etc.) in other test files.
import pytest
from util import new_test_table, unique_key_int
@pytest.fixture(scope="module")
def table_int_desc(cql, test_keyspace):
schema="k INT, c INT, PRIMARY KEY (k, c)"
order="WITH CLUSTERING ORDER BY (c DESC)"
with new_test_table(cql, test_keyspace, schema, order) as table:
yield table
# Verify that if a table is created with descending order for its
# clustering key, the default ordering of SELECT is changed to descending
# order. This was contrary to our documentation which used to suggest
# that SELECT always defaults to ascending order.
def test_select_default_order(cql, table_int_desc):
k = unique_key_int()
stmt = cql.prepare(f'INSERT INTO {table_int_desc} (k, c) VALUES ({k}, ?)')
numbers = range(5)
for i in numbers:
cql.execute(stmt, [i])
# In a table created with descending sort order, the default select
# order is descending:
rows = [(i,) for i in numbers]
reverse_rows = [(i,) for i in reversed(numbers)]
assert reverse_rows == list(cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k}'))
# Confirm that when specifying the order explicitly, both work:
assert rows == list(cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c ASC'))
assert reverse_rows == list(cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c DESC'))
# Repeat the same three assertions as above, adding a "limit" of N=3:
N=3
rows = rows[0:N]
reverse_rows = reverse_rows[0:N]
assert reverse_rows == list(cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k} LIMIT {N}'))
assert rows == list(cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c ASC LIMIT {N}'))
assert reverse_rows == list(cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c DESC LIMIT {N}'))
| agpl-3.0 | Python | |
b602c3467ee5969bc3292b7e494d60b9ccdbbedb | remove sum or c number | anubhavvardhan/qutip,zasdfgbnm/qutip,anubhavvardhan/qutip,zasdfgbnm/qutip,qutip/qutip,cgranade/qutip,cgranade/qutip,qutip/qutip | qutip/tests/test_rand.py | qutip/tests/test_rand.py | #This file is part of QuTIP.
#
# QuTIP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTIP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTIP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011-2013, Paul D. Nation & Robert J. Johansson
#
###########################################################################
from numpy.testing import assert_, assert_equal, run_module_suite
from qutip import *
class TestRand:
"""
A test class for the built-in random quantum object generators.
"""
def testRandUnitary(self):
"random Unitary"
U=array([rand_unitary(5) for k in range(5)])
for k in range(5):
assert_equal(U[k]*U[k].dag()==qeye(5), True)
def testRandherm(self):
"random hermitian"
H=array([rand_herm(5) for k in range(5)])
for k in range(5):
assert_equal(H[k].isherm==True, True)
def testRanddm(self):
"random density matrix"
R=array([rand_dm(5) for k in range(5)])
for k in range(5):
assert_equal(R[k].tr()-1.0<1e-15, True)
#verify all eigvals are >=0
assert_(not any(sp_eigs(R[k],vecs=False))<0)
#verify hermitian
assert_(R[k].isherm)
def testRandket(self):
"random ket"
P=array([rand_ket(5) for k in range(5)])
for k in range(5):
assert_equal(P[k].type=='ket', True)
if __name__ == "__main__":
run_module_suite()
| #This file is part of QuTIP.
#
# QuTIP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTIP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTIP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011-2013, Paul D. Nation & Robert J. Johansson
#
###########################################################################
from numpy.testing import assert_, assert_equal, run_module_suite
from qutip import *
class TestRand:
"""
A test class for the built-in random quantum object generators.
"""
def testRandUnitary(self):
"random Unitary"
U=array([rand_unitary(5) for k in range(5)])
for k in range(5):
assert_equal(U[k]*U[k].dag()==qeye(5), True)
def testRandherm(self):
"random hermitian"
H=array([rand_herm(5) for k in range(5)])
for k in range(5):
assert_equal(H[k].isherm==True, True)
def testRanddm(self):
"random density matrix"
R=array([rand_dm(5) for k in range(5)])
for k in range(5):
assert_equal(sum(R[k].tr())-1.0<1e-15, True)
#verify all eigvals are >=0
assert_(not any(sp_eigs(R[k],vecs=False))<0)
#verify hermitian
assert_(R[k].isherm)
def testRandket(self):
"random ket"
P=array([rand_ket(5) for k in range(5)])
for k in range(5):
assert_equal(P[k].type=='ket', True)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | Python |
796561ed822d64be6fd2ef299093711a8534d0e9 | add package py-lmodule version 0.1.0 (#18856) | iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/py-lmodule/package.py | var/spack/repos/builtin/packages/py-lmodule/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLmodule(PythonPackage):
"""Lmodule is a Python API for Lmod module system. It's primary purpose is
to help automate module testing. Lmodule uses Lmod spider tool to query
all modules in-order to automate module testing. Lmodule can be used with
environment-modules to interact with module using the Module class."""
homepage = "https://lmodule.readthedocs.io/en/latest/"
url = "https://pypi.io/packages/source/l/lmodule/lmodule-0.1.0.tar.gz"
git = "https://github.com/buildtesters/lmodule"
maintainers = ['shahzebsiddiqui']
version('0.1.0', sha256='cac8f3dad2df27b10e051b2c56ccbde1fcdd7044af594d13fd2e4144d3d46a29')
depends_on('python@3.6.0:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('lmod@7.0:', type='run')
| lgpl-2.1 | Python | |
f3bf91c8a9ba3a043f0ba4a11c2347e9b4a3c8be | Add linkins.script | thelinuxkid/linkins | linkins/script.py | linkins/script.py | import logging
import subprocess
log = logging.getLogger(__name__)
log.propagate = False
handler = logging.StreamHandler()
fmt = logging.Formatter(
fmt='%(script)s: %(stream)s: %(message)s',
)
handler.setFormatter(fmt)
log.addHandler(handler)
def _logscript(fp, **kwargs):
for line in fp:
line = line.strip()
log.info(line, extra=kwargs)
def runscript(path, name):
proc = subprocess.Popen(
[path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc.stderr as fp:
_logscript(
fp,
script=name,
stream='STDERR',
)
with proc.stdout as fp:
_logscript(
fp,
script=name,
stream='STDOUT',
)
| mit | Python | |
2ff8505db7ee0b4dbf08a2a61d00daaf681f5492 | Create dlpp.py | keirwl/dl_poly_parse | dlpp.py | dlpp.py | #!/usr/bin/env python
# dl_poly_parse
# If ran as script, takes a DL_POLY OUTPUT file and returns the physical properties as a parsed
# file of simple columns, for easy readability by plotting software.
#
# To do:
# * give option to output as csv
# * give option to return properties as horizontally or vertically sorted
# * allow importing as library to get single properties as lists
def getLines(OUTPUT):
with open(OUTPUT, "r") as f:
lines = f.readlines()
return lines
def getHeaders(OUTPUT, BREAK):
lines = getLines(OUTPUT)
firstBreak = lines.index(BREAK)
headers = lines[firstBreak+2].split() + lines[firstBreak+3].split() + lines[firstBreak+4].split()
headers.remove("(s)")
headers[headers.index("cpu")] = "cpu (s)"
return headers
def getProperty(OUTPUT, property):
pass
def sortList(unsorted):
# returns list reading down each column of 3 in OUTPUT rather than across each row
# this puts certain values usefully adjacent to each other e.g. time, step, cpu
# but separates others e.g. alpha, beta, gamma
sort = []
for i in range(0,len(unsorted)):
triple = unsorted[i::10]
for j in triple:
sort.append(j)
return sort[:30]
def getAllProps(OUTPUT, BREAK):
# returns physical properties as a huge list of lists
lines = getLines(OUTPUT)
headers = getHeaders(OUTPUT, BREAK)
properties = []
for i, l in enumerate(lines):
if l == BREAK and len(lines[i+1].split()) == 10: # data always found in lines of 10 after BREAK
values = lines[i+1].split() + lines[i+2].split() + lines[i+3].split()
if properties == []: # fill with lists of initial values if empty
properties = [[v] for v in values]
else: # append otherwise
for j, p in enumerate(properties):
p.append(values[j])
return len(properties[0]), headers, properties
# could optimise by initialising each list with zeroes
def main():
OUTPUT = "OUTPUT"
PARSED = "PARSED"
BREAK = " ------------------------------------------------------------------------------------------------------------------------\n"
n, headers, properties = getAllProps(OUTPUT, BREAK)
sortedHeaders = sortList(headers)
sortedProps = sortList(properties)
parsed = ""
for h in sortedHeaders:
parsed += "%-12s" % (h)
for i in range(0,n):
parsed += "\n"
for p in sortedProps:
parsed += "%-11s " % (p[i])
with open("PARSED", "w") as f:
f.write(parsed)
if __name__ == '__main__':
main()
| mit | Python | |
8967d4e0c5cd9adad7244cfc2ea78593be14b113 | Add regression test template | explosion/spacy-dev-resources,explosion/spacy-dev-resources | templates/tests/regression_test.py | templates/tests/regression_test.py | # coding: utf-8
from __future__ import unicode_literals
import pytest
def test_issueXXX():
"""Provide a description of what you're testing for here."""
# to use spaCy components, add the fixture names as arguments to the test
# for more info, check out the tests README:
# https://github.com/explosion/spaCy/blob/master/spacy/tests/README.md
# test stuff
| mit | Python | |
47734733a7ccbd242979b3c7ac9d792f59ac10d8 | Test for HERMES spectra of HD22879 | andycasey/precise-objective-differential-spectroscopy | code/test_hd22879.py | code/test_hd22879.py | import cPickle as pickle
from stellar_parameters import Star
from channel import SpectralChannel
class spectrum(object):
pass
import sick
spec = sick.specutils.Spectrum.load("spectra/hermes-sun.fits")
blue_channel = spectrum()
blue_channel.dispersion = spec.disp
blue_channel.flux = spec.flux
blue_channel.variance = spec.variance
with open("transitions.pkl", "rb") as fp:
transitions = pickle.load(fp)
# Get just blue channel ones
transition_indices = (blue_channel.dispersion[-1] > transitions["rest_wavelength"]) * (transitions["rest_wavelength"] > blue_channel.dispersion[0])
use_regions = np.array([
[4731.3, 4731.65],
[4742.65, 4742.93],
[4757.95, 4748.31],
[4759.1, 4759.56],
[4764.43, 4764.47],
[4778.08, 4778.41],
[4779.78, 4780.2],
[4781.59, 4781.92],
[4788.41, 4789],
[4789.91, 4790.19],
[4795.24, 4795.66],
[4798.39, 4798.64],
[4802.69, 4803.2],
[4805.3, 4805.71],
[4807.95, 4808.35],
[4820.23, 4820.6],
[4847.89, 4848.02],
[4869.85, 4870.3],
[4873.88, 4874.19],
[4884.95, 4885.25],
# [4889.9, 4892.67],
[4894.7, 4895.0]
])
#use_regions = np.array([
# [4705, 4850.],
# [4880., 5000.]
#])
mask = np.empty(len(blue_channel.dispersion))
mask[:] = np.nan
for row in use_regions:
indices = blue_channel.dispersion.searchsorted(row)
mask[indices[0]:indices[1] + 1] = 1.
print(np.sum(np.isfinite(mask)))
blue = SpectralChannel(blue_channel, transitions[transition_indices], mask=mask, redshift=False, continuum_order=3, wl_tolerance=0.1, wl_cont=2, outliers=True)
| mit | Python | |
1a6f702b670a4cad2ec1cd4044759ecfc656c9f2 | add thread | gitpythonkaka/test,gitpythonkaka/test,gitpythonkaka/test | thread/thread.py | thread/thread.py | #!/usr/bin/env python
import thread
from time import sleep, ctime
def thread0():
print '1 : start @ ', ctime()
sleep(4)
print '1 : end @ ', ctime()
def thread1():
print '2 : start @ ', ctime()
sleep(4)
print '2 : end @ ', ctime()
def main():
print 'starting at:', ctime()
thread.start_new_thread(thread0, ())
thread.start_new_thread(thread1, ())
sleep(6)
print 'all DONE at: ', ctime()
if __name__ == '__main__':
main()
| unlicense | Python | |
2feed8b291fd4c8081bb81458bedd736c08c448e | Add CNN example script. | openmv/openmv,kwagyeman/openmv,openmv/openmv,iabdalkader/openmv,kwagyeman/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv,iabdalkader/openmv,kwagyeman/openmv | usr/examples/09-Feature-Detection/cnn.py | usr/examples/09-Feature-Detection/cnn.py | # CMSIS CNN example.
import sensor, image, time, os
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((200, 200)) # Set 128x128 window.
sensor.skip_frames(time = 100) # Wait for settings take effect.
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot().lens_corr(1.6) # Take a picture and return the image.
out = img.classify_object()
# print label_id:confidence
#for i in range(0, len(out)):
# print("%s:%d "%(labels[i], out[i]), end="")
max_idx = out.index(max(out))
print("%s : %0.2f%% "%(labels[max_idx], (out[max_idx]/128)*100))
#print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
| mit | Python | |
0080f3a4f93a22b9c563c20d2c93b00ce8b7c382 | Set up game structure | Mailea/hexagonal-game-of-life | game.py | game.py | """
A variant of Conway's Game of Life on a hexagonal grid.
Rules: B2/S12
- Dead cells with two live neighbours are born.
- Live cells with one or two live neighbours survive.
- All other live cells die.
"""
# Rule Configuration
STATES = ('DEAD', 'ALIVE')
B = (2,)
S = (1, 2)
class Game:
def __init__(self, seed, max_steps=100):
self.generation = seed
self.max = max_steps
self.count = 0
def play(self):
self.generation.draw()
while self.count < self.max:
self.generation = self.generation.tick()
self.generation.draw()
self.count += 1
class Generation:
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
def draw(self):
# TODO
pass
def tick(self):
# TODO
pass
def _survives(self, row, col):
# TODO
pass
def _is_born(self, row, col):
# TODO
pass
| apache-2.0 | Python | |
72e69f3535c7e2cd82cdda62636eabd7421ebddf | Add dump script for all hiddens | judithfan/pix2svg | generative/tests/compare_test/concat_first/dump_hiddens.py | generative/tests/compare_test/concat_first/dump_hiddens.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import subprocess
if __name__ == "__main__":
for hiddens_dim in [512, 256, 128, 64, 32, 16]:
print('Dumping files for (%d)' % hiddens_dim)
model_path = '/mnt/visual_communication_dataset/trained_models_5_30_18/hiddens_fc6/%d/model_best.pth.tar' % hiddens_dim
out_dir = './dump_hiddens_outputs/%d/' % hiddens_dim
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = 'CUDA_VISIBLE_DEVICES=7 python dump.py {model} --train-test-split-dir ./train_test_split/1 --out-dir {outdir} --average-labels --overwrite-layer fc6 --cuda'.format(model=model_path, outdir=out_dir)
subprocess.call(command, shell=True)
| mit | Python | |
6e0202bb2385821907627046aef28b042961a2be | Create gate.py | powerboat9/MinecraftCPUBuild | gate.py | gate.py | mit | Python | ||
49b616ce93ba53bc6029145147a077945c18b604 | add async example | phrocker/sharkbite,phrocker/sharkbite,phrocker/sharkbite,phrocker/sharkbite,phrocker/sharkbite | examples/asyncexample.py | examples/asyncexample.py | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ctypes import cdll
from argparse import ArgumentParser
from ctypes import cdll
import ctypes
import traceback
import time
import asyncio
"""
This is an Example of using the Python connectors. The example will accept user input
create a table writing arbitrary information to it via the BatchWriter and scanner will put the written data
"""
parser = ArgumentParser(description="This is an Apache Accummulo Python connector")
parser.add_argument("-i", "--instance", dest="instance",
help="Apache Accumulo Instance Name", required=True)
parser.add_argument("-z", "--zookeepers", dest="zookeepers",
help="Comma Separated Zookeeper List", required=True)
parser.add_argument("-u", "--username", dest="username",
help="User to access Apache Accumulo", required=True)
parser.add_argument("-p", "--password", dest="password",
help="Password to access Apache Accumulo. May also be supplied at the command line")
parser.add_argument("-t", "--table", dest="table",
help="Table to create/update")
args = parser.parse_args()
password = args.password
table = args.table
async def printasync(iter):
async for keyvalue in iter:
key = keyvalue.getKey()
print(key.getRow())
if not password:
print("Please enter your password")
password = input()
if not table:
table = "blahblahd"
import pysharkbite
configuration = pysharkbite.Configuration()
zk = pysharkbite.ZookeeperInstance(args.instance, args.zookeepers, 1000, configuration)
user = pysharkbite.AuthInfo(args.username, password, zk.getInstanceId())
try:
connector = pysharkbite.AccumuloConnector(user, zk)
table_operations = connector.tableOps(table)
if not table_operations.exists(False):
print ("Creating table " + table)
table_operations.create(False)
else:
print (table + " already exists, so not creating it")
auths = pysharkbite.Authorizations()
""" Add authorizations """
""" mutation.put("cf","cq","cv",1569786960) """
writer = table_operations.createWriter(auths, 10)
mutation = pysharkbite.Mutation("row2");
mutation.put("cf","cq","",1569786960, "value")
mutation.put("cf2","cq2","",1569786960, "value2")
""" no value """
mutation.put("cf3","cq3","",1569786960, "")
writer.addMutation( mutation )
writer.close()
time.sleep(2)
""" auths.addAuthorization("cv") """
scanner = table_operations.createScanner(auths, 2)
startKey = pysharkbite.Key()
endKey = pysharkbite.Key()
startKey.setRow("row")
endKey.setRow("row3")
range = pysharkbite.Range(startKey,True,endKey,False)
scanner.addRange( range )
resultset = scanner.getResultSet()
loop = asyncio.get_event_loop()
loop.run_until_complete(printasync(resultset))
""" delete your table if user did not create temp """
if not args.table:
table_operations.remove()
except RuntimeError as e:
traceback.print_exc()
print("Oops, error caused: " + str(e))
| apache-2.0 | Python | |
e20e50c7cb1a22907bc83eec6c595a7bbaf8b8b9 | Add test_github.py | ymyzk/kawasemi,ymyzk/django-channels | tests/core/backends/test_github.py | tests/core/backends/test_github.py | # -*- coding: utf-8 -*-
import pytest
import requests
from kawasemi.backends.github import GitHubChannel
from kawasemi.exceptions import HttpError, ImproperlyConfigured
config = {
"_backend": "kawasemi.backends.github.GitHubChannel",
"token": "token",
"owner": "ymyzk",
"repository": "kawasemi"
}
@pytest.fixture()
def channel():
return GitHubChannel(**config)
class TestGitHubChannel(object):
def test_send(self, channel, mocker):
post = mocker.patch("requests.post")
response = requests.Response()
response.status_code = requests.codes.created
post.return_value = response
channel.send("My Issue Title")
channel.send("Issue Title", options={
"github": {
"body": """## ToDo
- [ ] Introduce A
- [ ] Refactor B""",
"labels": ["enhancement"],
"assignees": ["ymyzk"]
}
})
def test_send_fail_invalid_token(self, channel, mocker):
post = mocker.patch("requests.post")
response = requests.Response()
response.status_code = requests.codes.unauthorized
post.return_value = response
with pytest.raises(HttpError):
channel.send("Test title", fail_silently=False)
channel.send("Test title", fail_silently=True)
| mit | Python | |
aa1ca0b500af4ef89ba7ad7982b89ebe15252c1b | add heguilong answer for question3 | pythonzhichan/DailyQuestion,pythonzhichan/DailyQuestion | question_3/heguilong.py | question_3/heguilong.py | """
File: heguilong.py
Author: heguilong
Email: hgleagle@gmail.com
Github: https://github.com/hgleagle
Description:
统计一个文件中每个单词出现的次数,列出出现频率最多的5个单词。
"""
import logging
import sys
import re
from collections import Counter
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s \
- %(message)s')
class WordCount:
def __init__(self, file_name):
self.file_name = file_name
def count_word(self, most_num):
"""print most counts words
:most_num: print most counts words
"""
with open(self.file_name, 'r') as f:
data = f.read().lower()
# characters and single quote not split
words = re.split(r'[^\w\']+', data)
logging.debug(words)
most_cnts_words = Counter(words).most_common(most_num)
print(most_cnts_words)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: python3 heguilong.py file_name')
sys.exit()
word_count = WordCount(sys.argv[1])
word_count.count_word(5)
| mit | Python | |
69ba3715c762245e83d6b5388af4b77dfcc43dde | Create dataGenCore.py | kyanyoga/iot_kafka_datagen | bin/dataGenCore.py | bin/dataGenCore.py | #!/usr/bin python
import time
import random
import base64
import os
import sys
start = time.time()
# pwd = os.path.dirname(__file__)
# outputpath = os.path.normpath(pwd + '/../sample_data/' + sys.argv[1])
outputpath = os.path.normpath(sys.argv[1])
# print outputpath
#run for five minutes
# while time.time() < start + 300:
#run forever
while (True):
t = time.strftime('%Y-%m-%dT%H:%M:%S')
timezone = time.strftime('%z')
millis = "%.3d" % (time.time() % 1 * 1000)
#open file for append
outputfile = open(outputpath, 'a+')
#create random values
level = random.sample(['DEBUG', 'INFO', 'WARN', 'ERROR'], 1)[0]
message = random.sample(['Don\'t worry, be happy.',
'error, ERROR, Error!',
'Nothing happened. This is worthless. \
Don\'t log this.',
'Hello world.'], 1)[0]
logger = random.sample(['FooClass',
'BarClass',
'AuthClass',
'LogoutClass',
'BarClass',
'BarClass',
'BarClass',
'BarClass'], 1)[0]
user = random.sample(['jeff',
'mo',
'aaron',
'rajesh',
'sunil',
'zach',
'gus'], 1)[0]
ip = random.sample(['1.2.3.4',
'4.31.2.1',
'1.2.3.',
'1.22.3.3',
'3.2.4.5',
'113.2.4.5'], 1)[0]
req_time = str(int(abs(random.normalvariate(0, 1)) * 1000))
session_length = str(random.randrange(1, 12240))
session_id = base64.b64encode(str(random.randrange(1000000, 1000000000)))
extra = random.sample(['network=qa',
'network=prod',
'session_length=' + session_length,
'session_id="' + session_id + '"',
'user=extrauser'], 1)[0]
fields = []
fields.append('logger=' + logger)
fields.append('user=' + user)
fields.append('ip=' + ip)
fields.append('req_time=' + req_time)
fields.append(extra)
fields.pop(random.randrange(0, len(fields)))
# print to screen
# print "%s.%s%s %s %s [%s]" % (t,
# millis,
# timezone,
# level,
# message,
# ", ".join(fields))
# print to file
outputfile.write( "%s.%s%s %s %s [%s]\n" % (t,
millis,
timezone,
level,
message,
", ".join(fields)))
outputfile.close()
#print newline
# time.sleep(random.random())
| mit | Python | |
df784323d0da737755def4015840d118e3c8e595 | Add test that detects censorship in HTTP pages based on HTTP body length | juga0/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe | nettests/core/http_body_length.py | nettests/core/http_body_length.py | # -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from twisted.internet import defer
from twisted.python import usage
from ooni.templates import httpt
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],
['factor', 'f', 0.8, 'What factor should be used for triggering censorship (0.8 == 80%)']
]
class HTTPBodyLength(httpt.HTTPTest):
"""
Performs a two GET requests to the set of sites to be tested for
censorship, one over a known good control channel (Tor), the other over the
test network.
We then look at the response body lengths and see if the control response
differs from the experiment response by a certain factor.
"""
name = "HTTP Body length test"
author = "Arturo Filastò"
version = "0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
'List of URLS to perform GET and POST requests to']
# These values are used for determining censorship based on response body
# lengths
control_body_length = None
experiment_body_length = None
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.url = self.input
elif self.localOptions['url']:
self.url = self.localOptions['url']
else:
raise Exception("No input specified")
self.factor = self.localOptions['factor']
def compare_body_lengths(self):
body_length_a = self.control_body_length
body_length_b = self.experiment_body_length
rel = float(body_length_a)/float(body_length_b)
if rel > 1:
rel = 1/rel
self.report['body_proportion'] = rel
self.report['factor'] = self.factor
if rel < self.factor:
self.report['censorship'] = True
else:
self.report['censorship'] = False
def test_get(self):
def errback(failure):
log.err("There was an error while testing %s" % self.url)
log.exception(failure)
def control_body(result):
self.control_body_length = len(result)
if self.experiment_body_length:
self.compare_body_lengths()
def experiment_body(result):
self.experiment_body_length = len(result)
if self.control_body_length:
self.compare_body_lengths()
dl = []
experiment_request = self.doRequest(self.url, method="GET",
body_processor=experiment_body)
control_request = self.doRequest(self.url, method="GET",
use_tor=True, body_processor=control_body)
dl.append(experiment_request)
dl.append(control_request)
d = defer.DeferredList(dl)
return d
| bsd-2-clause | Python | |
e546e055b33c776fddaa244075d59a99978265ea | add reading | jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk | vehicles/management/commands/import_reading.py | vehicles/management/commands/import_reading.py | from ciso8601 import parse_datetime
from django.utils.timezone import make_aware
from django.contrib.gis.geos import Point
from busstops.models import Service
from ...models import VehicleLocation, VehicleJourney
from ..import_live_vehicles import ImportLiveVehiclesCommand
class Command(ImportLiveVehiclesCommand):
url = 'http://rtl2.ods-live.co.uk/api/vehiclePositions'
source_name = 'Reading'
services = Service.objects.filter(operator__in=('RBUS', 'GLRB', 'KENN', 'NADS', 'THVB'), current=True)
@staticmethod
def get_datetime(item):
return make_aware(parse_datetime(item['observed']))
def get_vehicle(self, item):
vehicle = item['vehicle']
defaults = {
'source': self.source
}
if vehicle.isdigit():
defaults['fleet_number'] = vehicle
return self.vehicles.get_or_create(
defaults,
operator_id='RBUS',
code=vehicle
)
def get_journey(self, item, vehicle):
journey = VehicleJourney()
journey.route_name = item['service']
if vehicle.latest_location and vehicle.latest_location.journey.route_name == journey.route_name:
journey.service = vehicle.latest_location.journey.service
else:
try:
journey.service = self.services.get(line_name=item['service'])
except (Service.DoesNotExist, Service.MultipleObjectsReturned) as e:
print(e, item['service'])
return journey
def create_vehicle_location(self, item):
return VehicleLocation(
latlong=Point(float(item['longitude']), float(item['latitude'])),
heading=item['bearing'] or None
)
| mpl-2.0 | Python | |
3b00930f9c6e6552bef5b5939916a1b8e737287a | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/pyaudio/read.py | python/pyaudio/read.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See http://people.csail.mit.edu/hubert/pyaudio/docs/#example-blocking-mode-audio-i-o
import pyaudio
import wave
CHUNK = 1024
wf = wave.open("test.wav", 'rb')
print(wf.getnchannels())
print(wf.getframerate())
p = pyaudio.PyAudio()
print(p.get_device_count())
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
output_device_index=3)
# read data
data = wf.readframes(CHUNK)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
| mit | Python | |
0799f888dd67439fa7ba9a3a26427a21ad804c62 | Add XML plist module | depp/sglib,depp/sglib | scripts/gen/plistxml.py | scripts/gen/plistxml.py | import xml.dom.minidom
import xml.dom as dom
# The methods you want to use are 'load' and 'dump'
# All keys and strings will be unicode
# this will FAIL with non Unicode strings
# this is intentional
class PlistError(ValueError):
pass
def checkwhite(obj):
if obj.nodeType == dom.Node.TEXT_NODE:
if obj.data.strip():
raise PlistError('expected whitespace')
else:
raise PlistError('expected whitespace')
def loadString(n):
text = u''
for child in n.childNodes:
if child.nodeType == dom.Node.TEXT_NODE:
text += child.data
else:
raise PlistError('invalid node in string')
return text
def loadDict(n):
d = {}
key = None
for child in n.childNodes:
if child.nodeType == dom.Node.ELEMENT_NODE:
if child.tagName == 'key':
if key is not None:
raise PlistError('expcted value')
key = loadString(child)
if key in d:
raise PlistError('duplicate key %r' % key)
else:
if key is None:
raise PlistError('expected key')
d[key] = loadValue(child)
key = None
else:
checkwhite(child)
if key is not None:
raise PlistError('expected value')
return d
def loadArray(n):
a = []
for child in n.childNodes:
if child.nodeType == dom.Node.ELEMENT_NODE:
a.append(loadValue(child))
else:
checkwhite(child)
return a
def loadReal(n):
x = loadString(n)
try:
return float(x)
except ValueError:
raise PlistError('invalid real')
def loadInteger(n):
x = loadString(n)
try:
return int(x)
except ValueError:
raise PlistError('invalid integer')
def checknone(x):
for c in x.childNodes:
checkwhite(c)
def loadFalse(x):
checknone(x)
return False
def loadTrue(x):
checknone(x)
return True
LOADERS = {
'dict': loadDict,
'array': loadArray,
'string': loadString,
'real': loadReal,
'integer': loadInteger,
'true': loadTrue,
'false': loadFalse,
}
def loadValue(n):
k = n.tagName
try:
l = LOADERS[k]
except KeyError:
raise PlistError('invalid tag: %r' % (k,))
return l(n)
def load(data):
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
if root.nodeType != dom.Node.ELEMENT_NODE or root.tagName != 'plist':
raise PlistError('not a plist')
hasroot = False
for child in root.childNodes:
if child.nodeType == dom.Node.ELEMENT_NODE:
if hasroot:
raise PlistError('multiple roots')
hasroot = True
obj = loadValue(child)
else:
checkwhite(child)
if not hasroot:
raise PlistError('no root object')
return obj
def dumpValue(doc, obj):
if isinstance(obj, dict):
n = doc.createElement('dict')
for k, v in sorted(obj.iteritems()):
c = doc.createElement('key')
c.appendChild(doc.createTextNode(k))
n.appendChild(c)
c = dumpValue(doc, v)
n.appendChild(c)
elif isinstance(obj, unicode):
n = doc.createElement('string')
n.appendChild(doc.createTextNode(obj))
elif isinstance(obj, bool):
n = doc.createElement(['false', 'true'][obj])
elif isinstance(obj, int):
n = doc.createElement('integer')
n.appendChild(doc.createTextNode(str(obj)))
elif isinstance(obj, float):
n = doc.createElement('real')
n.appendChild(doc.createTextNode(str(obj)))
elif isinstance(obj, list) or isinstance(obj, tuple):
n = doc.createElement('array')
for v in obj:
n.appendChild(dumpValue(doc, v))
else:
raise TypeErorr('unknown type: %r' % obj)
return n
def dump(obj):
impl = xml.dom.minidom.getDOMImplementation()
dt = impl.createDocumentType(
'plist', "-//Apple//DTD PLIST 1.0//EN",
"http://www.apple.com/DTDs/PropertyList-1.0.dtd")
doc = impl.createDocument(None, 'plist', dt)
doc.documentElement.appendChild(dumpValue(doc, obj))
return doc.toxml('UTF-8')
| bsd-2-clause | Python | |
f970198596d8c20c89701fbcce38fd5736096e86 | Set maximal word length limit | cheshirenet/cheshirenet | namegen/markov.py | namegen/markov.py | #!/usr/bin/env python
"""
Module which produces readble name from 256-bit of random data
(i.e. sha-256 hash)
"""
MAXWORDLEN=12
#
# Modules which contain problablity dictionaries
# generated by genmarkov script
#
from surname_hash import surname
from female_hash import female
from male_hash import male
#
import operator
gendernames=[male,female]
class RandError(Exception):
"""
Raised if all bits are exhausted
"""
pass
def getrandom(count):
"""
extracts neccessary amount of randomness (non neccessary
integer number of bits) from given input
"""
global data
if data==0:
raise RandError("No more random data")
data,r=divmod(data,count)
return r
def morerandom():
"""
Checks if there are some random bits left
"""
global data
return data!=0
def mkword(x):
"""
Compuites word from given dictionary.
Randomly chooses first and second letter and
with probabilities of their occurence after whicespace
and then chooses next letter until space is encountered
"""
count=0
global MAXWORDLEN
first=x[' ']
count=reduce(operator.add,map(len,first.itervalues()))
i=getrandom(count)
for letter in sorted(first):
if i<len(first[letter]):
word=letter+first[letter][i]
break
i-=len(first[letter])
while word[-1]!=' ':
y=x[word[-2]][word[-1]]
word+=y[getrandom(len(y))]
if len(word)>= MAXWORDLEN:
word+=" "
break
return word.title()
def mkname(hash):
"""
Gets 64 hexadecimal digits and computes name from it
"""
global data
if (len(hash)<64):
raise ValueError("To few random data: "+hash)
data=int(hash,16)
x=gendernames[getrandom(2)]
name=mkword(surname)
more=True
while more:
try:
name+=mkword(x)
except RandError:
more= False
return name.strip()
if __name__ == "__main__":
#
# Test main routine which reads random data from
#
import sys
from base64 import b16encode
from os import urandom
if len(sys.argv)>1 and sys.argv[1].isdigit():
for i in xrange(0,int(sys.argv[1])):
d=b16encode(urandom(32))
try:
print mkname(d)
except RandError:
print "Not enough random: "+d
else:
for i in xrange(0,20):
d=b16encode(urandom(32))
print d,mkname(d)
| agpl-3.0 | Python | |
3601a0dc9d762e17c24e0dbf86ee1ef4a00c49cd | Add tests for the authorize_user function | lorenzogil/yith-library-server,Yaco-Sistemas/yith-library-server,Yaco-Sistemas/yith-library-server,lorenzogil/yith-library-server,lorenzogil/yith-library-server,Yaco-Sistemas/yith-library-server | yithlibraryserver/tests/test_security.py | yithlibraryserver/tests/test_security.py | from pyramid.httpexceptions import HTTPBadRequest, HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.security import authorize_user
class AuthorizationTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def test_authorize_user(self):
request = testing.FakeRequest({})
# The authorization header is required
self.assertRaises(HTTPUnauthorized, authorize_user, request)
request = testing.FakeRequest({'Authorization': 'Basic foobar'})
# Only the bearer method is allowed
self.assertRaises(HTTPBadRequest, authorize_user, request)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid code
self.assertRaises(HTTPUnauthorized, authorize_user, request)
access_code_id = self.db.access_codes.insert({
'code': '1234',
'user': 'user1',
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
self.assertRaises(HTTPUnauthorized, authorize_user, request)
user_id = self.db.users.insert({
'username': 'user1',
}, safe=True)
self.db.access_codes.update({'_id': access_code_id}, {
'$set': {'user': user_id},
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
authorized_user = authorize_user(request)
self.assertEqual(authorized_user['username'], 'user1')
| agpl-3.0 | Python | |
66db96dc523ab838475eb3826766bb4278c18673 | Add tests for remove_display_attributes. | mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju | tests/test_assess_cloud_display.py | tests/test_assess_cloud_display.py | from tests import TestCase
from assess_cloud_display import remove_display_attributes
from utility import JujuAssertionError
class TestRemoveDisplayAttributes(TestCase):
def test_remove_display_attributes(self):
cloud = {
'defined': 'local',
'description': 'Openstack Cloud',
'type': 'openstack',
}
remove_display_attributes(cloud)
self.assertEqual(cloud, {'type': 'openstack'})
def test_remove_display_attributes_bad_defined(self):
with self.assertRaises(JujuAssertionError):
remove_display_attributes({'defined': 'foo'})
def test_remove_display_attributes_bad_description(self):
with self.assertRaises(JujuAssertionError):
remove_display_attributes({
'defined': 'local',
'description': 'bar',
'type': 'openstack',
})
| agpl-3.0 | Python | |
6c12786f74c17ab8328fed9bfebbb003f2e9f282 | Add always true entry | techbureau/zaifbot,techbureau/zaifbot | zaifbot/rules/entry/always_true_entry.py | zaifbot/rules/entry/always_true_entry.py | from zaifbot.rules.entry.base import Entry
class AlwaysTrueEntry(Entry):
def __init__(self, currency_pair, amount, action, name=None):
super().__init__(currency_pair=currency_pair, amount=amount, action=action, name=name)
def can_entry(self):
return True
| mit | Python | |
93f0f573c40ed7878f744a9fee2b2a9e85157d5e | append elevations to GPX from SRTM dataset with gpxelevations util in SRTM.py package | tumluliu/mmrp-osm-analyzer,tumluliu/mmrp-osm-analyzer,tumluliu/mmrp-osm-analyzer | src/gpx_elev_enhancer.py | src/gpx_elev_enhancer.py | # Append elevations to GPX files
# 2015-05-08
# Lu LIU
#
from os import listdir
from os.path import isfile, join
import srtm
import gpxpy
gpx_file_dir = "/Users/user/Research/data/GPX/Munich"
gpx_files = [f for f in listdir(gpx_file_dir) if isfile(join(gpx_file_dir, f))]
for gpx_file in gpx_files:
print "add elevations for " + gpx_file + "...",
gpx = gpxpy.parse(open(join(gpx_file_dir, gpx_file)))
elev_data = srtm.get_data()
elev_data.add_elevations(gpx, smooth=True)
print " done!"
| mit | Python | |
98852758b85c2e6c53cc22dc30b5b4418bece6b5 | Add transfer paging unit tests | sirosen/globus-sdk-python,globus/globus-sdk-python,globus/globus-sdk-python,globusonline/globus-sdk-python | tests/unit/test_transfer_paging.py | tests/unit/test_transfer_paging.py | import requests
import json
import six
import pytest
from globus_sdk.transfer.paging import PaginatedResource
from globus_sdk.transfer.response import IterableTransferResponse
N = 25
class PagingSimulator(object):
def __init__(self, n):
self.n = n # the number of simulated items
def simulate_get(self, path, params=None,
headers=None, response_class=None, retry_401=True):
"""
Simulates a paginated response from a Globus API get supporting limit,
offset, and has next page
"""
offset = params["offset"]
limit = params["limit"]
data = {} # dict that will be treated as the json data of a response
data["offset"] = offset
data["limit"] = limit
# fill data field
data["DATA"] = []
for i in range(offset, min(self.n, offset + limit)):
data["DATA"].append({"value": i})
# fill has_next_page field
data["has_next_page"] = (offset + limit) < self.n
# make the simulated response
response = requests.Response()
response._content = six.b(json.dumps(data))
response.headers["Content-Type"] = "application/json"
return IterableTransferResponse(response)
@pytest.fixture
def paging_simulator():
return PagingSimulator(N)
def test_data(paging_simulator):
"""
Gets data from PaginatedResource objects based on paging_simulator,
confirms data is the expected range of numbers
tests num_results < n, num_results > n, num_results = None,
"""
# num_results < n
less_results = N - 7
pr_less = PaginatedResource(
paging_simulator.simulate_get, "path", {"params": {}},
max_results_per_call=10, num_results=less_results)
# confirm results
for item, expected in zip(pr_less.data, range(less_results)):
assert item["value"] == expected
assert pr_less.num_results_fetched == less_results
# num_results > n
more_results = N + 7
pr_more = PaginatedResource(
paging_simulator.simulate_get, "path", {"params": {}},
max_results_per_call=10, num_results=more_results)
# confirm results
for item, expected in zip(pr_more.data, range(N)):
assert item["value"] == expected
assert pr_more.num_results_fetched == N
# num_results = None (fetch all)
pr_none = PaginatedResource(
paging_simulator.simulate_get, "path", {"params": {}},
max_results_per_call=10, num_results=None)
# confirm results
for item, expected in zip(pr_none.data, range(N)):
assert item["value"] == expected
assert pr_none.num_results_fetched == N
def test_iterable_func(paging_simulator):
"""
Gets the generator from a PaginatedResource's iterable_func,
sanity checks usage
"""
pr = PaginatedResource(
paging_simulator.simulate_get, "path", {"params": {}},
max_results_per_call=10, num_results=None)
generator = pr.iterable_func()
for i in range(N):
assert six.next(generator)["value"] == i
with pytest.raises(StopIteration):
six.next(generator)
| apache-2.0 | Python | |
7e68ec932cb43fc5a98828a367a51593b419bee0 | Add batch normalization | spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc | thinc/neural/_classes/batchnorm.py | thinc/neural/_classes/batchnorm.py |
from .model import Model
class BatchNormalization(Model):
def predict_batch(self, X):
N, mu, var = _get_moments(self.ops, X)
return _forward(self.ops, X, mu, var)
def begin_update(self, X, dropout=0.0):
N, mu, var = _get_moments(self.ops, X)
Xhat = _forward(self.ops, X, mu, var)
def finish_update(dy, optimizer=None, **kwargs):
assert len(X) == len(dy)
dist, sum_dy, sum_dy_dist = _get_d_moments(self.ops, dy, X, mu)
if hasattr(dy, 'shape'):
d_xhat = N * dy - sum_dy - dist * var**(-1.) * sum_dy_dist
d_xhat *= var ** (-1. / 2)
d_xhat /= N
return d_xhat
else:
seqs = (dy, sum_dy, dist, sum_dy_dist)
output = []
assert len(sum_dy) == len(dy)
assert len(dist) == len(dy)
assert len(sum_dy_dist) == len(dy)
for dy_, sum_dy_, dist_, sum_dy_dist_ in zip(*seqs):
d_xhat = N * dy_ - sum_dy_ - dist_ * var**(-1.) * sum_dy_dist_
d_xhat *= var ** (-1. / 2)
d_xhat /= N
output.append(d_xhat)
assert len(output) == len(dy), (len(output), len(dy))
return output
return Xhat, finish_update
def _get_moments(ops, X):
if hasattr(X, 'shape') and len(X.shape) == 2:
mu = X.mean(axis=0)
var = X.var(axis=0) + 1e-8
return X.shape[0], mu, var
else:
stacked = numpy.vstack(X)
return stacked.shape[0], stacked.mean(axis=0), stacked.var(axis=0)
def _get_d_moments(ops, dy, X, mu):
if hasattr(dy, 'shape'):
dist = X-mu
return dist, ops.xp.sum(dy, axis=0), ops.xp.sum(dy * dist, axis=0)
else:
sum_dy = [ops.xp.sum(seq, axis=0) for seq in dy]
dist = [x-mu for x in X]
sum_dy_dot_dist = [ops.xp.sum(seq * d, axis=0) for seq, d in zip(dy, dist)]
return dist, sum_dy, sum_dy_dot_dist
def _forward(ops, X, mu, var):
if hasattr(X, 'shape'):
return (X-mu) * var ** (-1./2.)
else:
return [_forward(x, mu, var) for x in X]
| mit | Python | |
c99a476b396422c0a673a78eb795df1cf94b8bb5 | Define base Frame object. | lawnmowerlatte/hyper,masaori335/hyper,plucury/hyper,irvind/hyper,plucury/hyper,masaori335/hyper,fredthomsen/hyper,Lukasa/hyper,jdecuyper/hyper,Lukasa/hyper,jdecuyper/hyper,fredthomsen/hyper,irvind/hyper,lawnmowerlatte/hyper | hyper/http20/frame.py | hyper/http20/frame.py | # -*- coding: utf-8 -*-
"""
hyper/http20/frame
~~~~~~~~~~~~~~~~~~
Defines framing logic for HTTP/2.0. Provides both classes to represent framed
data and logic for aiding the connection when it comes to reading from the
socket.
"""
class Frame(object):
def __init__(self):
self.stream = None
def serialize(self):
raise NotImplementedError()
| mit | Python | |
1696d6b1f240f8403819e3d817ae8e387ab5d08c | Add FFT checkers. | cournape/numscons,cournape/numscons,cournape/numscons | numscons/checkers/fft_checkers.py | numscons/checkers/fft_checkers.py | #! /usr/bin/env python
# Last Change: Tue Dec 04 03:00 PM 2007 J
# Module for custom, common checkers for numpy (and scipy)
import sys
import os.path
from copy import deepcopy
from distutils.util import get_platform
# from numpy.distutils.scons.core.libinfo import get_config_from_section, get_config
# from numpy.distutils.scons.testcode_snippets import cblas_sgemm as cblas_src, \
# c_sgemm as sunperf_src, lapack_sgesv, blas_sgemm, c_sgemm2, \
# clapack_sgesv as clapack_src
# from numpy.distutils.scons.fortran_scons import CheckF77Mangling, CheckF77Clib
from numscons.configuration import add_info
from perflib import CheckMKL, CheckFFTW3, CheckFFTW2
from support import check_include_and_run, ConfigOpts, ConfigRes
__all__ = ['CheckFFT']
def CheckFFT(context, autoadd = 1, check_version = 0):
"""This checker tries to find optimized library for fft"""
libname = 'fft'
env = context.env
def check(func, name, suplibs):
st, res = func(context, autoadd, check_version)
# XXX: check for fft code ?
if st:
for lib in suplibs:
res.cfgopts['libs'].append(lib)
add_info(env, libname, res)
return st
# Check MKL
st = check(CheckMKL, 'MKL', [])
if st:
return st
# Check fftw3
st = check(CheckFFTW3, 'fftw3', ['fftw3'])
if st:
return st
# Check fftw2
st = check(CheckFFTW2, 'fftw2', ['fftw'])
if st:
return st
add_info(env, libname, None)
return 0
| bsd-3-clause | Python | |
9b0f4062729d70ec5236ec244d5eaca7e4653b47 | Test for functions in utils added | CiscoPSIRT/openVulnAPI,CiscoPSIRT/openVulnAPI,CiscoPSIRT/openVulnAPI,CiscoPSIRT/openVulnAPI,CiscoPSIRT/openVulnAPI | openVulnQuery/tests/test_utils.py | openVulnQuery/tests/test_utils.py | import unittest
from openVulnQuery import utils
from openVulnQuery import advisory
mock_advisory_title = "Mock Advisory Title"
mock_advisory = advisory.CVRF(advisory_id="Cisco-SA-20111107-CVE-2011-0941",
sir="Medium",
first_published="2011-11-07T21:36:55+0000",
last_updated="2011-11-07T21:36:55+0000",
cves=["CVE-2011-0941", "NA"],
cvrf_url="http://tools.cisco.com/security/center/contentxml/CiscoSecurityAdvisory/Cisco-SA-20111107-CVE-2011-0941/cvrf/Cisco-SA-20111107-CVE-2011-0941_cvrf.xml",
bug_ids="BUGISidf",
cvss_base_score="7.0",
advisory_title="%s" % mock_advisory_title,
publication_url="https://tools.cisco.com/mockurl",
cwe="NA",
product_names=["product_name_1", "product_name_2"],
summary="This is summary")
mock_advisories = [mock_advisory]
class UtilsTest(unittest.TestCase):
def test_filter_advisories_general_input(self):
fields = ["advisory_title", "sir", "bug_ids"]
expected_output = [{"advisory_title": "Mock Advisory Title", "sir": "Medium", "bug_ids": "BUGISidf"}]
output = utils.filter_advisories(mock_advisories, fields)
self.assertListEqual(output, expected_output)
def test_filter_advisories_empty_fields(self):
fields = []
expected_output = [{}]
self.assertListEqual(utils.filter_advisories(mock_advisories, fields), expected_output)
def test_filter_advisories_invalid_fields(self):
fields = ["advisory_title", "v_score"]
expected_output = [{'advisory_title': '%s' % mock_advisory_title}]
output = utils.filter_advisories(mock_advisories, fields)
self.assertIsInstance(output, list)
self.assertDictEqual(output[0], expected_output[0])
def test_count_fields_valid_input(self):
fields = ["bug_ids", "advisory_title"]
expected_output = {'bug_ids': 1, 'advisory_title': 1}
output = utils.count_fields(mock_advisories, fields)
self.assertDictEqual(output, expected_output)
def test_count_fields_invalid_input(self):
fields = ["bug_ids", "v_score"]
expected_output = {'bug_ids': 1, 'v_score': 0}
output = utils.count_fields(mock_advisories, fields)
self.assertDictEqual(output, expected_output)
def test_get_count_valid(self):
self.assertEqual(utils.get_count(getattr(mock_advisory, "advisory_title")), 1)
self.assertEqual(utils.get_count(getattr(mock_advisory, "product_names")), 2)
def test_get_count_fields_with_NA(self):
self.assertEqual(utils.get_count(getattr(mock_advisory, "cwe")), 0)
self.assertEqual(utils.get_count(getattr(mock_advisory, "cves")), 1)
| mit | Python | |
88e87392204884102b17a92581c5d5b29a258bb7 | add ftpsync | openprocurement/openprocurement.search,imaginal/openprocurement.search | openprocurement/search/ftpsync.py | openprocurement/search/ftpsync.py | # -*- coding: utf-8 -*-
import os
import sys
import signal
import os.path
import logging
import logging.config
from ftplib import FTP
from ConfigParser import ConfigParser
logger = logging.getLogger(__name__)
class FTPSyncApp(object):
config = {
'host': '127.0.0.1',
'port': 21,
'timeout': 120,
'user': 'anonymous',
'passwd': 'anonymous@user.tld',
'ftp_dir': '',
'local_dir': '',
'filematch': 'ocds-tender-*.json',
}
def __init__(self, config={}):
self.config.update(config)
self.config['timeout'] = float(self.config['timeout'])
self.ftp = FTP()
def run(self):
self.ftp.connect(
self.config['host'],
self.config['port'],
self.config['timeout'])
self.ftp.login(
self.config['user'],
self.config['passwd'])
if self.config['ftp_dir']:
self.ftp.cwd(self.config['ftp_dir'])
if self.config['local_dir']:
logger.info("CD %s", self.config['local_dir'])
os.chdir(self.config['local_dir'])
filematch = self.config['filematch']
for filename in self.ftp.nlst(filematch):
if os.path.exists(filename):
logger.info("EXISTS %s", filename)
continue
try:
fp = open(filename, 'wb')
logger.info("RETR %s", filename)
self.ftp.retrbinary('RETR ' + filename, fp.write)
fp.close()
except Exception as e:
logger.error("Exception {}".format(e))
os.unlink(filename)
def signal_handler(signo, frame):
sys.exit(0)
def main():
if len(sys.argv) < 2:
print("Usage: ftpsync config.ini")
sys.exit(1)
#logging.config.fileConfig(sys.argv[1])
logging.basicConfig(level=logging.DEBUG)
parser = ConfigParser()
parser.read(sys.argv[1])
signal.signal(signal.SIGTERM, signal_handler)
config = parser.items('ftpsync')
app = FTPSyncApp(config)
app.run()
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
eb4294f95cb05337ef432840d9538de1275b22b4 | Add routes. | laonawuli/addrest,laonawuli/addrest,laonawuli/addrest,laonawuli/addrest,laonawuli/addrest | web2py/routes.py | web2py/routes.py | routes_in = [
('/', '/addrest/default/index'),
]
| mit | Python | |
e3757b20ca74e070e57dd251bf60f691922999fe | add new test file | dfdeshom/solrcloudpy,relwell/solrcloudpy,relwell/solrcloudpy,dfdeshom/solrcloudpy | test/test_collection.py | test/test_collection.py | import unittest
from solr_instance import SolrInstance
from solrcloudpy import Connection
class TestCollection(unittest.TestCase):
def setUp(self):
self.solrprocess = SolrInstance("solr2")
self.solrprocess.start()
self.solrprocess.wait_ready()
self.conn = Connection()
def tearDown(self):
self.solrprocess.terminate()
def test_create_collection(self):
coll2 = self.conn.create_collection('coll2')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.