commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
c4d583966ef1a4d9bdb57715ef5e766ba62fbed6 | Add tests for the Django directory | prophile/jacquard,prophile/jacquard | jacquard/directory/tests/test_django.py | jacquard/directory/tests/test_django.py | from jacquard.directory.base import UserEntry
from jacquard.directory.django import DjangoDirectory
import pytest
import unittest.mock
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
if sqlalchemy is not None:
test_database = sqlalchemy.create_engine('sqlite://')
test_database.execute("""
CREATE TABLE auth_user(
id INTEGER NOT NULL PRIMARY KEY,
date_joined DATETIME NOT NULL,
is_superuser BOOLEAN NOT NULL
)
""")
test_database.execute("""
INSERT INTO auth_user(id, date_joined, is_superuser) VALUES
(1, date('now'), 1),
(2, date('now'), 0),
(3, date('now'), 0)
""")
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_extant_user():
directory = DjangoDirectory('')
user_one = directory.lookup('1')
assert list(user_one.tags) == ['superuser']
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_missing_user():
directory = DjangoDirectory('')
user_zero = directory.lookup('0')
assert user_zero is None
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_all_users():
directory = DjangoDirectory('')
users = directory.all_users()
assert [x.id for x in users] == [1, 2, 3]
| mit | Python | |
6babb6e64e93ed74a72203fdc67955ae8ca3bfb3 | Add a baseline set of _MultiCall performance tests | RonnyPfannschmidt/pluggy,nicoddemus/pluggy,hpk42/pluggy,pytest-dev/pluggy,pytest-dev/pluggy,RonnyPfannschmidt/pluggy,tgoodlet/pluggy | testing/benchmark.py | testing/benchmark.py | """
Benchmarking and performance tests.
"""
import pytest
from pluggy import _MultiCall, HookImpl
from pluggy import HookspecMarker, HookimplMarker
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def MC(methods, kwargs, firstresult=False):
hookfuncs = []
for method in methods:
f = HookImpl(None, "<temp>", method, method.example_impl)
hookfuncs.append(f)
return _MultiCall(hookfuncs, kwargs, {"firstresult": firstresult})
@hookimpl(hookwrapper=True)
def m1(arg1, arg2, arg3):
yield
@hookimpl
def m2(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def w1(arg1, arg2, arg3):
yield
@hookimpl(hookwrapper=True)
def w2(arg1, arg2, arg3):
yield
def inner_exec(methods):
return MC(methods, {'arg1': 1, 'arg2': 2, 'arg3': 3}).execute()
@pytest.mark.benchmark
def test_hookimpls_speed(benchmark):
benchmark(inner_exec, [m1, m2])
@pytest.mark.benchmark
def test_hookwrappers_speed(benchmark):
benchmark(inner_exec, [w1, w2])
@pytest.mark.benchmark
def test_impls_and_wrappers_speed(benchmark):
benchmark(inner_exec, [m1, m2, w1, w2])
| mit | Python | |
21dc462b47f5b5577d51119ddd340c518d8cfb94 | Add script to rename photos in directory | deadlyraptor/reels | photos.py | photos.py | import os
from datetime import date
# Programs at the Coral Gables Art Cinema.
programs = ['1. Main Features', '2. After Hours', '3. Special Screenings',
'4. Family Day on Aragon', '5. National Theatre Live',
'6. See It in 70mm', '7. Alternative Content']
for program in programs:
print(program)
index = int(input('Select a program by its number: '))
program = programs[index - 1][3:]
title = input('Select a film: ')
photo_dir = input('Location of the photos: ')
new_name = input('Enter new base file name: ')
root = 'M:/Coral Gables Art Cinema/Programming/'
year = str(date.today().year)
path = os.path.join(root, program, year, title, photo_dir)
num_suffix = 1
for photo in os.listdir(path):
final_name = '{} {}.jpg'.format(new_name, num_suffix)
os.rename(os.path.join(path, photo), os.path.join(path, final_name))
num_suffix += 1
| mit | Python | |
f182dae6eb0a17f8b7a437694b69b273595f9549 | Add YAML export | maebert/jrnl,notbalanced/jrnl,philipsd6/jrnl,MinchinWeb/jrnl | jrnl/plugins/yaml_exporter.py | jrnl/plugins/yaml_exporter.py | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, unicode_literals, print_function
from .text_exporter import TextExporter
import re
import sys
import yaml
class MarkdownExporter(TextExporter):
"""This Exporter can convert entries and journals into Markdown with YAML front matter."""
names = ["yaml"]
extension = "md"
@classmethod
def export_entry(cls, entry, to_multifile=True):
"""Returns a markdown representation of a single entry, with YAML front matter."""
if to_multifile is False:
print("{}ERROR{}: YAML export must be to individual files. Please specify a directory to export to.".format("\033[31m", "\033[0m", file=sys.stderr))
return
date_str = entry.date.strftime(entry.journal.config['timeformat'])
body_wrapper = "\n" if entry.body else ""
body = body_wrapper + entry.body
'''Increase heading levels in body text'''
newbody = ''
heading = '###'
previous_line = ''
warn_on_heading_level = False
for line in entry.body.splitlines(True):
if re.match(r"#+ ", line):
"""ATX style headings"""
newbody = newbody + previous_line + heading + line
if re.match(r"#######+ ", heading + line):
warn_on_heading_level = True
line = ''
elif re.match(r"=+$", line) and not re.match(r"^$", previous_line):
"""Setext style H1"""
newbody = newbody + heading + "# " + previous_line
line = ''
elif re.match(r"-+$", line) and not re.match(r"^$", previous_line):
"""Setext style H2"""
newbody = newbody + heading + "## " + previous_line
line = ''
else:
newbody = newbody + previous_line
previous_line = line
newbody = newbody + previous_line # add very last line
if warn_on_heading_level is True:
print("{}WARNING{}: Headings increased past H6 on export - {} {}".format("\033[33m", "\033[0m", date_str, entry.title), file=sys.stderr)
# top = yaml.dump(entry)
return "title: {title}\ndate: {date}\nstared: {stared}\ntags: {tags}\n{body} {space}".format(
date=date_str,
title=entry.title,
stared=entry.starred,
tags=', '.join([tag[1:] for tag in entry.tags]),
body=newbody,
space=""
)
@classmethod
def export_journal(cls, journal):
"""Returns an error, as YAML export requires a directory as a target."""
print("{}ERROR{}: YAML export must be to individual files. Please specify a directory to export to.".format("\033[31m", "\033[0m", file=sys.stderr))
return
| mit | Python | |
41bd33421f14498737aa0088f2d93b00bb521d7b | implement a viewset controller, capable of containing controllers | jjongbloets/julesTk | julesTk/controller/viewset.py | julesTk/controller/viewset.py |
from . import ViewController
class ViewSetController(ViewController):
def __init__(self, parent, view=None):
super(ViewSetController, self).__init__(parent, view)
self._controllers = {}
@property
def controllers(self):
""" Dictionary with all controllers used in this viewset
:return:
:rtype: dict[str, julesTk.controller.BaseController]
"""
return self._controllers
def has_controller(self, name):
"""Whether a controller is registered to this controller using the given name"""
return name in self.controllers.keys()
def get_controller(self, name):
"""Return the controller registered under the given name"""
if not self.has_controller(name):
raise KeyError("No controller registered using the name: {}".format(name))
return self.controllers[name]
def add_controller(self, name, c):
"""Register a controller under a new name"""
if self.has_controller(name):
raise KeyError("Another controller is already registered under: {}".format(name))
self.controllers[name] = c
def remove_controller(self, name):
"""Remove controller and name from the registry"""
if not self.has_controller(name):
raise KeyError("No controller registered using the name: {}".format(name))
return self.controllers.pop(name)
def remove_controllers(self):
"""Remove all controllers from the registry
And tell them to stop
"""
while len(self.controllers.keys()) > 0:
key = self.controllers.keys()[0]
self.remove_controller(key).stop()
return len(self.controllers.keys()) == 0
def _stop(self):
self.remove_controllers()
super(ViewSetController, self)._stop()
| mit | Python | |
8c5fb07b37eebf484c33ca735bd2b9dac5d0dede | solve 1 problem | Shuailong/Leetcode | solutions/nested-list-weight-sum.py | solutions/nested-list-weight-sum.py | #!/usr/bin/env python
# encoding: utf-8
"""
nested-list-weight-sum.py
Created by Shuailong on 2016-03-30.
https://leetcode.com/problems/nested-list-weight-sum/.
"""
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
# class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution(object):
def depthSum1(self, nestedList, depth):
s = 0
for nl in nestedList:
if nl.isInteger():
s += depth * nl.getInteger()
else:
s += self.depthSum1(nl.getList(), depth+1)
return s
def depthSum(self, nestedList):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
return self.depthSum1(nestedList, 1)
def main():
pass
if __name__ == '__main__':
main()
| mit | Python | |
0738b3816db752b8cb678324ff4c113625660b94 | add test for pathops.operations.intersection | fonttools/skia-pathops | tests/operations_test.py | tests/operations_test.py | from pathops import Path, PathVerb
from pathops.operations import union, difference, intersection, reverse_difference, xor
import pytest
@pytest.mark.parametrize(
"subject_path, clip_path, expected",
[
[
[
(PathVerb.MOVE, ((0, 0),)),
(PathVerb.LINE, ((0, 10),)),
(PathVerb.LINE, ((10, 10),)),
(PathVerb.LINE, ((10, 0),)),
(PathVerb.CLOSE, ()),
],
[
(PathVerb.MOVE, ((5, 5),)),
(PathVerb.LINE, ((5, 15),)),
(PathVerb.LINE, ((15, 15),)),
(PathVerb.LINE, ((15, 5),)),
(PathVerb.CLOSE, ()),
],
[
(PathVerb.MOVE, ((5, 5),)),
(PathVerb.LINE, ((10, 5),)),
(PathVerb.LINE, ((10, 10),)),
(PathVerb.LINE, ((5, 10),)),
(PathVerb.CLOSE, ()),
],
]
],
)
def test_intersection(subject_path, clip_path, expected):
sub = Path()
for verb, pts in subject_path:
sub.add(verb, *pts)
clip = Path()
for verb, pts in clip_path:
clip.add(verb, *pts)
result = Path()
intersection([sub], [clip], result.getPen())
assert list(result) == expected
| bsd-3-clause | Python | |
f3c11599ef1714f7337191719172614c43b87eff | Add tests.test_OrderedSet. | therealfakemoot/collections2,JustusW/BetterOrderedDict | tests/test_OrderedSet.py | tests/test_OrderedSet.py | from twisted.trial import unittest
from better_od import OrderedSet
class TestOrderedDict(unittest.TestCase):
def setUp(self):
self.values = 'abcddefg'
self.s = OrderedSet(self.values)
def test_order(self):
expected = list(enumerate('abcdefg'))
self.assertEquals(list(enumerate(self.s)), expected)
def test_index(self):
self.assertEquals(self.s.key_index('c'), 2)
def test_add_new_value(self):
prev = len(self.s)
self.s.add('z')
self.assertEqual(len(self.s), prev + 1)
| mit | Python | |
7df6189dbfd69c881fedf71676dd4fdbc7dba2f0 | Add test for renormalize migration | CenterForOpenScience/scrapi,fabianvf/scrapi,ostwald/scrapi,erinspace/scrapi,mehanig/scrapi,felliott/scrapi,erinspace/scrapi,alexgarciac/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,icereval/scrapi,jeffreyliu3230/scrapi,felliott/scrapi,fabianvf/scrapi | tests/test_migrations.py | tests/test_migrations.py | import copy
import pytest
import mock
import scrapi
from scrapi.linter.document import NormalizedDocument
from scrapi import tasks
from scrapi import registry
from scrapi.migrations import delete
from scrapi.migrations import rename
from scrapi.migrations import renormalize
# Need to force cassandra to ignore set keyspace
from scrapi.processing.cassandra import CassandraProcessor, DocumentModel
from . import utils
test_cass = CassandraProcessor()
harvester = utils.TestHarvester()
NORMALIZED = NormalizedDocument(utils.RECORD)
RAW = harvester.harvest()[0]
@pytest.fixture
def harvester():
pass # Need to override this
@pytest.mark.cassandra
def test_rename():
real_es = scrapi.processing.elasticsearch.es
scrapi.processing.elasticsearch.es = mock.MagicMock()
test_cass.process_raw(RAW)
test_cass.process_normalized(RAW, NORMALIZED)
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
old_source = NORMALIZED['shareProperties']['source']
assert(queryset[0].source == utils.RECORD['shareProperties']['source'])
assert(queryset[0].source == old_source)
new_record = copy.deepcopy(utils.RECORD)
new_record['shareProperties']['source'] = 'wwe_news'
test_info = registry['test'].__class__()
test_info.short_name = 'wwe_news'
registry['wwe_news'] = test_info
tasks.migrate(rename, source=old_source, target='wwe_news')
queryset = DocumentModel.objects(docID=RAW['docID'], source='wwe_news')
assert(queryset[0].source == 'wwe_news')
assert(len(queryset) == 1)
scrapi.processing.elasticsearch.es = real_es
@pytest.mark.cassandra
def test_delete():
real_es = scrapi.processing.elasticsearch.es
scrapi.processing.elasticsearch.es = mock.MagicMock()
test_cass.process_raw(RAW)
test_cass.process_normalized(RAW, NORMALIZED)
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
assert(len(queryset) == 1)
tasks.migrate(delete, source=RAW['source'])
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
assert(len(queryset) == 0)
scrapi.processing.elasticsearch.es = real_es
@pytest.mark.cassandra
def test_renormalize():
real_es = scrapi.processing.elasticsearch.es
scrapi.processing.elasticsearch.es = mock.MagicMock()
test_cass.process_raw(RAW)
test_cass.process_normalized(RAW, NORMALIZED)
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
assert(len(queryset) == 1)
tasks.migrate(renormalize, source=RAW['source'])
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
assert(len(queryset) == 1)
scrapi.processing.elasticsearch.es = real_es
| import copy
import pytest
import mock
import scrapi
from scrapi.linter.document import NormalizedDocument
from scrapi import tasks
from scrapi import registry
from scrapi.migrations import delete
from scrapi.migrations import rename
# Need to force cassandra to ignore set keyspace
from scrapi.processing.cassandra import CassandraProcessor, DocumentModel
from . import utils
test_cass = CassandraProcessor()
harvester = utils.TestHarvester()
NORMALIZED = NormalizedDocument(utils.RECORD)
RAW = harvester.harvest()[0]
@pytest.fixture
def harvester():
pass # Need to override this
@pytest.mark.cassandra
def test_rename():
real_es = scrapi.processing.elasticsearch.es
scrapi.processing.elasticsearch.es = mock.MagicMock()
test_cass.process_raw(RAW)
test_cass.process_normalized(RAW, NORMALIZED)
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
old_source = NORMALIZED['shareProperties']['source']
assert(queryset[0].source == utils.RECORD['shareProperties']['source'])
assert(queryset[0].source == old_source)
new_record = copy.deepcopy(utils.RECORD)
new_record['shareProperties']['source'] = 'wwe_news'
test_info = registry['test'].__class__()
test_info.short_name = 'wwe_news'
registry['wwe_news'] = test_info
tasks.migrate(rename, source=old_source, target='wwe_news')
queryset = DocumentModel.objects(docID=RAW['docID'], source='wwe_news')
assert(queryset[0].source == 'wwe_news')
assert(len(queryset) == 1)
scrapi.processing.elasticsearch.es = real_es
@pytest.mark.cassandra
def test_delete():
real_es = scrapi.processing.elasticsearch.es
scrapi.processing.elasticsearch.es = mock.MagicMock()
test_cass.process_raw(RAW)
test_cass.process_normalized(RAW, NORMALIZED)
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
assert(len(queryset) == 1)
tasks.migrate(delete, source=RAW['source'])
queryset = DocumentModel.objects(docID=RAW['docID'], source=RAW['source'])
assert(len(queryset) == 0)
scrapi.processing.elasticsearch.es = real_es
| apache-2.0 | Python |
85fc51ef3d75d2f78e80b346897d22bebf797424 | add mf_helpers | kylewm/mf2py,kylewm/mf2py,tommorris/mf2py,tommorris/mf2py | mf2py/mf_helpers.py | mf2py/mf_helpers.py | def get_url(mf):
"""parses the mf dictionary obtained as returns the URL"""
urls = []
for item in mf:
if isinstance(item, basestring):
urls.append(item)
else:
itemtype = [x for x in item.get('type',[]) if x.startswith('h-')]
if itemtype is not []:
urls.extend(item.get('properties',{}).get('url',[]))
return urls
| mit | Python | |
1831dbd065a8776a77d18e10b44f84c99bca4c75 | Add test of simple textcat workflow | aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,honnibal/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,spacy-io/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy | spacy/tests/textcat/test_textcat.py | spacy/tests/textcat/test_textcat.py | from __future__ import unicode_literals
from ...language import Language
def test_simple_train():
nlp = Language()
nlp.add_pipe(nlp.create_pipe('textcat'))
nlp.get_pipe('textcat').add_label('is_good')
nlp.begin_training()
for i in range(5):
for text, answer in [('aaaa', 1.), ('bbbb', 0), ('aa', 1.),
('bbbbbbbbb', 0.), ('aaaaaa', 1)]:
nlp.update([text], [{'cats': {'answer': answer}}])
doc = nlp(u'aaa')
assert 'is_good' in doc.cats
assert doc.cats['is_good'] >= 0.5
| mit | Python | |
47af5fc466936f46e05f4ebaf89257e5c731a38e | add test_handle_conversation_after_delete | rickmak/chat,lakoo/chat,SkygearIO/chat,lakoo/chat,SkygearIO/chat,lakoo/chat | plugin/test/test_handle_conversation_after_delete.py | plugin/test/test_handle_conversation_after_delete.py | import unittest
import copy
from unittest.mock import Mock
import chat_plugin
from chat_plugin import handle_conversation_after_delete
class TestHandleConversationAfterDelete(unittest.TestCase):
def setUp(self):
self.conn = None
self.mock_publish_event = Mock()
chat_plugin._publish_event = self.mock_publish_event
def record(self):
return {
'participant_ids': ['user1', 'user2', 'user3'],
'admin_ids': ['user1']
}
def test_publish_event_count_should_be_three(self):
handle_conversation_after_delete(self.record(), self.conn)
self.assertIs(self.mock_publish_event.call_count, 3)
| apache-2.0 | Python | |
a4620f5371cea0a90360c6968c7ecbe426e9e1f4 | Create genomic_range_query.py | py-in-the-sky/challenges,py-in-the-sky/challenges,py-in-the-sky/challenges | codility/genomic_range_query.py | codility/genomic_range_query.py | """
https://codility.com/programmers/task/genomic_range_query/
"""
from collections import Counter
def solution(S, P, Q):
# Instead of counters, could've also used four prefix-sum and four suffix-sum
# arrays. E.g., `pref_1` would just do a prefix sum across S, summing up
# only the ones; `pref_2` would sum up only the twos; etc.
values = {'A': 1, 'C': 2, 'G': 3, 'T': 4}
S = tuple(values[char] for char in S)
total_counts = Counter(S)
pref = prefix_counts(S)
suff = suffix_counts(S)
def _min_impact_factor(p, q):
slice_counts = {val: (count - pref[p][val] - suff[q][val])
for val,count in total_counts.iteritems()}
return next(v for v in (1, 2, 3, 4) if v in slice_counts and slice_counts[v] > 0)
return [_min_impact_factor(p, q) for p,q in zip(P, Q)]
def prefix_counts(A):
result = [None] * len(A)
result[0] = {val: 0 for val in (1, 2, 3, 4)}
for i in xrange(1, len(A)):
counts = result[i-1].copy()
counts[A[i-1]] += 1
result[i] = counts
return result
def suffix_counts(A):
result = [None] * len(A)
result[-1] = {val: 0 for val in (1, 2, 3, 4)}
for i in xrange(len(A)-2, -1, -1):
counts = result[i+1].copy()
counts[A[i+1]] += 1
result[i] = counts
return result
| mit | Python | |
ce93955bc9a5f16129ec93293a6debdb7e75891a | add script to generate gexf from csvs | berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud | tools/graph/generate_gexf_from_csv.py | tools/graph/generate_gexf_from_csv.py | #!/usr/bin/env python3
# generate a gexf file from a node csv and an edges csv
import argparse
import csv
import networkx as nx
import re
import mediawords.util.log
logger = mediawords.util.log.create_logger(__name__)
def main():
parser = argparse.ArgumentParser(description='generate a gexf file from nodes and edges csvs')
parser.add_argument('--nodesfile', type=str, help="csv with nodes", required=True)
parser.add_argument('--edgesfile', type=str, help="csv with edges", required=True)
parser.add_argument('--gexffile', type=str, help="gexf output file", required=True)
parser.add_argument('--dropedges', action="store_true", help="drop edges with missing nodes")
parser.add_argument('--noprune', action="store_true", help="drop nodes not in giant component")
args = parser.parse_args()
with open(args.nodesfile) as f:
csv_nodes = []
for i, node in enumerate(csv.DictReader(f)):
try:
csv_nodes.append(node)
except Exception as e:
raise(ValueError("error importing node line %d: %s" % (i, e)))
with open(args.edgesfile) as f:
csv_edges = []
for i, edge in enumerate(csv.DictReader(f)):
try:
csv_edges.append(edge)
except Exception as e:
raise(ValueError("error importing edge line %d: %s" % (i, e)))
g = nx.DiGraph()
node_lookup = {}
for csv_node in csv_nodes:
if 'stories_id' in csv_node and 'id' not in csv_node:
csv_node['id'] = csv_node['stories_id']
del(csv_node['stories_id'])
if 'title' in csv_node and 'label' not in csv_node:
csv_node['label'] = csv_node['title']
del(csv_node['title'])
for key in csv_node:
if re.search('count$', key):
try:
csv_node[key] = int(csv_node[key])
except(ValueError):
csv_node[key] = 0
if 'id' not in csv_node:
raise(ValueError('node does not include valid id field: ' + str(csv_node)))
g.add_node(csv_node['id'], csv_node)
node_lookup[csv_node['id']] = csv_node
for csv_edge in csv_edges:
source = csv_edge['source'] or csv_edge['stories_id_a']
target = csv_edge['target'] or csv_edge['stories_id_b']
if source not in node_lookup or target not in node_lookup:
if not args.dropedges:
raise(ValueError('nodes list does not include source or target: ' + str(csv_edge)))
else:
continue
g.add_edge(source, target)
# g = max(nx.weakly_connected_component_subgraphs(g), key=len)
component_graphs = sorted(nx.weakly_connected_component_subgraphs(g), key=len, reverse=True)
giant_g = component_graphs.pop(0)
dropped_nodes = []
[dropped_nodes.extend(dropped_graph.nodes(data=True)) for dropped_graph in component_graphs]
num_dropped_nodes = len(dropped_nodes)
if args.noprune:
logger.info("keeping %d nodes outside giant component" % num_dropped_nodes)
else:
g = giant_g
if num_dropped_nodes > 0:
logger.info("dropped %d nodes pruning to giant component" % num_dropped_nodes)
[logger.debug("dropped node: %s" % str(n)) for n in dropped_nodes]
nx.write_gexf(g, args.gexffile)
main()
| agpl-3.0 | Python | |
911da4d608883931166db3db27668cbc20413a6f | Create a .csv file from the CRISPR database. | mbonsma/phageParser,phageParser/phageParser,goyalsid/phageParser,goyalsid/phageParser,mbonsma/phageParser,mbonsma/phageParser,phageParser/phageParser,goyalsid/phageParser,phageParser/phageParser,phageParser/phageParser,mbonsma/phageParser | extract_CRISPRdb.py | extract_CRISPRdb.py | import requests
from pattern import web
import re
import csv
def get_dom(url):
html = requests.get(url).text
dom = web.Element(html)
return dom
def get_taxons_from_CRISPRdb():
url = "http://crispr.u-psud.fr/crispr/"
dom_homepage = get_dom(url)
container = dom_homepage('div[class="strainlist"]')[0]
crisprs = []
for link in container('a'):
crispr = {}
crispr['Name'] = link.content.encode('ascii','ignore')
crispr['Taxon_id'] = link.href.encode('ascii','ignore')[46:]
crisprs.append(crispr)
return crisprs
def get_genome_properties(crispr):
url_crispr = "http://crispr.u-psud.fr/cgi-bin/crispr/SpecieProperties.cgi?Taxon_id=" + crispr['Taxon_id']
html_crispr = requests.get(url_crispr).text
dom_crispr = web.Element(html_crispr)
table = dom_crispr('table[class="primary_table"]')[1]
crispr['Ref_seqs']={}
for sequence in table('tr'):
if sequence('th')==[]:
crispr['Ref_seqs'][sequence('td')[1].content.encode('ascii','ignore')]=[]
return crispr
def get_CRISPR_properties(crispr):
for ref_seq in crispr['Ref_seqs'].keys():
url = "http://crispr.u-psud.fr/crispr/CRISPRProperties.php?RefSeq=" + ref_seq + "&Taxon=" + crispr['Taxon_id']
html = requests.get(url).text
dom = web.Element(html)
div = dom('div[class="rightcontent"]')[1]
table = div('table[class="primary_table"]')[0]
tbody = table('tbody')[0]
for crispr_id in tbody('tr'):
cell = crispr_id('td')[1]
crispr_id_value = cell('font')[0].content.encode('ascii','ignore').replace('<br/n/>','')
crispr_id_value = crispr_id_value.replace('\n','')
crispr_id_value = crispr_id_value.replace('\t','')
crispr_id_value = crispr_id_value.replace('<br />','')
crispr['Ref_seqs'][ref_seq].append(crispr_id_value)
return crispr
def get_results():
crisprs = get_taxons_from_CRISPRdb()
#uncomment if you want to test the script for 4 cases
#k = 0
for crispr in crisprs:
#uncomment if you want to test the script for 4 cases
# k+=1
# if k==5:
# break
crispr = get_genome_properties(crispr)
crispr = get_CRISPR_properties(crispr)
results = []
#uncomment if you want to test the script for 4 cases
#k = 0
for crispr in crisprs:
#uncomment if you want to test the script for 4 cases
#k+=1
#if k==5:
# break
for ref_seq in crispr['Ref_seqs'].keys():
for crispr_id in crispr['Ref_seqs'][ref_seq]:
params = {'checked[]': crispr_id, 'Taxon': crispr['Taxon_id']}
r = requests.post("http://crispr.u-psud.fr/crispr/crispr.php", data=params)
file_crispr_seq = web.Element(r.text)
table = file_crispr_seq('table[class="crisprs_table"]')[0]
url = "http://crispr.u-psud.fr" + table('form')[-2].action
source = requests.get(url).text
begin = re.search('(?<=Crispr_begin_position: )\d+', source)
begin = begin.group(0)
end = re.search('(?<=Crispr_end_position: )\d+', source)
end = end.group(0)
main_accession_number = ref_seq
loc_id = re.search('(?<=Crispr Rank in the sequence: )\d+', source)
loc_id = loc_id.group(0)
results.append([main_accession_number,loc_id,begin,end])
with open('results.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for result in results:
writer.writerow(result)
get_results()
| mit | Python | |
1cb55aa6b3abd4a3a20ff0f37b6c80c0c89ef1ff | Add a dummy pavement file. | njwilson23/scipy,efiring/scipy,jonycgn/scipy,pnedunuri/scipy,sonnyhu/scipy,raoulbq/scipy,witcxc/scipy,nmayorov/scipy,pizzathief/scipy,WarrenWeckesser/scipy,ilayn/scipy,zerothi/scipy,andyfaff/scipy,Kamp9/scipy,Newman101/scipy,maniteja123/scipy,Shaswat27/scipy,josephcslater/scipy,ChanderG/scipy,pyramania/scipy,WarrenWeckesser/scipy,perimosocordiae/scipy,gdooper/scipy,hainm/scipy,rgommers/scipy,sargas/scipy,cpaulik/scipy,rgommers/scipy,zxsted/scipy,vberaudi/scipy,pnedunuri/scipy,piyush0609/scipy,haudren/scipy,matthewalbani/scipy,andim/scipy,perimosocordiae/scipy,haudren/scipy,trankmichael/scipy,njwilson23/scipy,gdooper/scipy,gdooper/scipy,zerothi/scipy,andim/scipy,raoulbq/scipy,lukauskas/scipy,pnedunuri/scipy,trankmichael/scipy,andim/scipy,tylerjereddy/scipy,pschella/scipy,newemailjdm/scipy,anntzer/scipy,ogrisel/scipy,nmayorov/scipy,juliantaylor/scipy,felipebetancur/scipy,jonycgn/scipy,nvoron23/scipy,ilayn/scipy,pschella/scipy,ortylp/scipy,piyush0609/scipy,endolith/scipy,tylerjereddy/scipy,Eric89GXL/scipy,maciejkula/scipy,Srisai85/scipy,sriki18/scipy,dominicelse/scipy,maniteja123/scipy,ogrisel/scipy,jonycgn/scipy,sargas/scipy,lukauskas/scipy,FRidh/scipy,richardotis/scipy,vanpact/scipy,chatcannon/scipy,njwilson23/scipy,surhudm/scipy,futurulus/scipy,haudren/scipy,juliantaylor/scipy,ales-erjavec/scipy,Kamp9/scipy,jseabold/scipy,nonhermitian/scipy,trankmichael/scipy,richardotis/scipy,scipy/scipy,mortonjt/scipy,larsmans/scipy,apbard/scipy,WillieMaddox/scipy,mdhaber/scipy,gdooper/scipy,jor-/scipy,mtrbean/scipy,behzadnouri/scipy,josephcslater/scipy,mortonjt/scipy,Srisai85/scipy,anntzer/scipy,fernand/scipy,maciejkula/scipy,rmcgibbo/scipy,newemailjdm/scipy,fernand/scipy,mortada/scipy,Shaswat27/scipy,vigna/scipy,jsilter/scipy,dch312/scipy,vanpact/scipy,argriffing/scipy,teoliphant/scipy,matthewalbani/scipy,piyush0609/scipy,pizzathief/scipy,mortonjt/scipy,aman-iitj/scipy,e-q/scipy,befelix/scipy,rgommers/scipy,mdhaber/scipy,mikebenfield/scipy,efiring/scipy,mgaitan/scipy,ilayn/scipy,WillieMaddox/scipy,jseabold/scipy,anntzer/scipy,josephcslater/scipy,mortonjt/scipy,andim/scipy,gfyoung/scipy,petebachant/scipy,raoulbq/scipy,nvoron23/scipy,zxsted/scipy,surhudm/scipy,behzadnouri/scipy,ChanderG/scipy,sonnyhu/scipy,newemailjdm/scipy,juliantaylor/scipy,lhilt/scipy,tylerjereddy/scipy,WillieMaddox/scipy,andyfaff/scipy,jjhelmus/scipy,sauliusl/scipy,kalvdans/scipy,grlee77/scipy,newemailjdm/scipy,sriki18/scipy,futurulus/scipy,larsmans/scipy,nmayorov/scipy,cpaulik/scipy,mdhaber/scipy,argriffing/scipy,perimosocordiae/scipy,jamestwebber/scipy,ogrisel/scipy,Kamp9/scipy,zxsted/scipy,Stefan-Endres/scipy,niknow/scipy,lukauskas/scipy,bkendzior/scipy,aman-iitj/scipy,ogrisel/scipy,chatcannon/scipy,ndchorley/scipy,kalvdans/scipy,jsilter/scipy,felipebetancur/scipy,Newman101/scipy,niknow/scipy,jjhelmus/scipy,Dapid/scipy,newemailjdm/scipy,jakevdp/scipy,giorgiop/scipy,fredrikw/scipy,andyfaff/scipy,grlee77/scipy,giorgiop/scipy,woodscn/scipy,Gillu13/scipy,petebachant/scipy,woodscn/scipy,ChanderG/scipy,FRidh/scipy,trankmichael/scipy,nvoron23/scipy,teoliphant/scipy,rmcgibbo/scipy,aeklant/scipy,larsmans/scipy,vberaudi/scipy,aman-iitj/scipy,njwilson23/scipy,tylerjereddy/scipy,kalvdans/scipy,kleskjr/scipy,endolith/scipy,gertingold/scipy,Gillu13/scipy,pbrod/scipy,mortada/scipy,Dapid/scipy,Newman101/scipy,Gillu13/scipy,dch312/scipy,jjhelmus/scipy,newemailjdm/scipy,mikebenfield/scipy,Shaswat27/scipy,nmayorov/scipy,futurulus/scipy,minhlongdo/scipy,Newman101/scipy,lukauskas/scipy,Dapid/scipy,gef756/scipy,mortada/scipy,andyfaff/scipy,apbard/scipy,aarchiba/scipy,efiring/scipy,trankmichael/scipy,jamestwebber/scipy,nonhermitian/scipy,Eric89GXL/scipy,richardotis/scipy,ilayn/scipy,hainm/scipy,FRidh/scipy,chatcannon/scipy,richardotis/scipy,sriki18/scipy,gertingold/scipy,kalvdans/scipy,behzadnouri/scipy,jamestwebber/scipy,vhaasteren/scipy,juliantaylor/scipy,ortylp/scipy,ilayn/scipy,FRidh/scipy,giorgiop/scipy,lhilt/scipy,arokem/scipy,perimosocordiae/scipy,vanpact/scipy,ortylp/scipy,mdhaber/scipy,person142/scipy,behzadnouri/scipy,maciejkula/scipy,tylerjereddy/scipy,e-q/scipy,ortylp/scipy,Kamp9/scipy,Eric89GXL/scipy,lhilt/scipy,endolith/scipy,efiring/scipy,ogrisel/scipy,pbrod/scipy,endolith/scipy,haudren/scipy,nonhermitian/scipy,woodscn/scipy,scipy/scipy,anielsen001/scipy,mortada/scipy,ales-erjavec/scipy,larsmans/scipy,jakevdp/scipy,dominicelse/scipy,mortonjt/scipy,efiring/scipy,Stefan-Endres/scipy,ndchorley/scipy,WillieMaddox/scipy,teoliphant/scipy,dominicelse/scipy,piyush0609/scipy,trankmichael/scipy,arokem/scipy,teoliphant/scipy,Eric89GXL/scipy,andim/scipy,Eric89GXL/scipy,josephcslater/scipy,pizzathief/scipy,aeklant/scipy,rgommers/scipy,cpaulik/scipy,Stefan-Endres/scipy,fernand/scipy,jor-/scipy,ales-erjavec/scipy,matthewalbani/scipy,kleskjr/scipy,sriki18/scipy,lukauskas/scipy,mgaitan/scipy,nonhermitian/scipy,haudren/scipy,endolith/scipy,matthew-brett/scipy,nonhermitian/scipy,mingwpy/scipy,jsilter/scipy,giorgiop/scipy,mtrbean/scipy,pschella/scipy,anielsen001/scipy,grlee77/scipy,arokem/scipy,jamestwebber/scipy,Eric89GXL/scipy,rmcgibbo/scipy,Gillu13/scipy,witcxc/scipy,sriki18/scipy,pnedunuri/scipy,Kamp9/scipy,sauliusl/scipy,ndchorley/scipy,zaxliu/scipy,nmayorov/scipy,befelix/scipy,kleskjr/scipy,felipebetancur/scipy,mdhaber/scipy,WarrenWeckesser/scipy,richardotis/scipy,matthew-brett/scipy,kleskjr/scipy,jsilter/scipy,endolith/scipy,gertingold/scipy,sriki18/scipy,sargas/scipy,nvoron23/scipy,jamestwebber/scipy,vberaudi/scipy,futurulus/scipy,zerothi/scipy,zaxliu/scipy,Gillu13/scipy,dch312/scipy,Shaswat27/scipy,sargas/scipy,minhlongdo/scipy,giorgiop/scipy,arokem/scipy,witcxc/scipy,perimosocordiae/scipy,argriffing/scipy,zaxliu/scipy,vigna/scipy,fredrikw/scipy,pizzathief/scipy,ales-erjavec/scipy,Stefan-Endres/scipy,fredrikw/scipy,jakevdp/scipy,woodscn/scipy,anntzer/scipy,lhilt/scipy,gef756/scipy,gertingold/scipy,jjhelmus/scipy,maniteja123/scipy,matthewalbani/scipy,chatcannon/scipy,aarchiba/scipy,WarrenWeckesser/scipy,pbrod/scipy,andim/scipy,felipebetancur/scipy,argriffing/scipy,dominicelse/scipy,FRidh/scipy,pnedunuri/scipy,pnedunuri/scipy,mdhaber/scipy,niknow/scipy,anielsen001/scipy,zxsted/scipy,fredrikw/scipy,hainm/scipy,jonycgn/scipy,ChanderG/scipy,futurulus/scipy,mtrbean/scipy,person142/scipy,sargas/scipy,sauliusl/scipy,minhlongdo/scipy,surhudm/scipy,jseabold/scipy,woodscn/scipy,aarchiba/scipy,vigna/scipy,scipy/scipy,haudren/scipy,befelix/scipy,woodscn/scipy,person142/scipy,befelix/scipy,scipy/scipy,matthew-brett/scipy,andyfaff/scipy,nvoron23/scipy,giorgiop/scipy,ales-erjavec/scipy,mhogg/scipy,lhilt/scipy,piyush0609/scipy,zerothi/scipy,sonnyhu/scipy,mingwpy/scipy,mgaitan/scipy,Shaswat27/scipy,Srisai85/scipy,Newman101/scipy,mingwpy/scipy,petebachant/scipy,ortylp/scipy,vhaasteren/scipy,anielsen001/scipy,gef756/scipy,petebachant/scipy,bkendzior/scipy,pyramania/scipy,mortada/scipy,fernand/scipy,gef756/scipy,arokem/scipy,lukauskas/scipy,bkendzior/scipy,pbrod/scipy,mhogg/scipy,fernand/scipy,vhaasteren/scipy,niknow/scipy,perimosocordiae/scipy,aeklant/scipy,maciejkula/scipy,Stefan-Endres/scipy,WarrenWeckesser/scipy,jor-/scipy,person142/scipy,gfyoung/scipy,ndchorley/scipy,ortylp/scipy,raoulbq/scipy,mortonjt/scipy,jseabold/scipy,ChanderG/scipy,gertingold/scipy,WarrenWeckesser/scipy,andyfaff/scipy,gef756/scipy,vanpact/scipy,petebachant/scipy,aman-iitj/scipy,mingwpy/scipy,futurulus/scipy,pyramania/scipy,piyush0609/scipy,ales-erjavec/scipy,fredrikw/scipy,Srisai85/scipy,matthew-brett/scipy,mortada/scipy,Dapid/scipy,mhogg/scipy,apbard/scipy,vigna/scipy,zaxliu/scipy,grlee77/scipy,maciejkula/scipy,larsmans/scipy,aman-iitj/scipy,apbard/scipy,vanpact/scipy,gdooper/scipy,hainm/scipy,mgaitan/scipy,mhogg/scipy,argriffing/scipy,pizzathief/scipy,bkendzior/scipy,gfyoung/scipy,niknow/scipy,rmcgibbo/scipy,Dapid/scipy,vberaudi/scipy,vberaudi/scipy,jonycgn/scipy,cpaulik/scipy,Dapid/scipy,maniteja123/scipy,maniteja123/scipy,e-q/scipy,kleskjr/scipy,jseabold/scipy,vberaudi/scipy,mgaitan/scipy,aarchiba/scipy,mikebenfield/scipy,raoulbq/scipy,fernand/scipy,richardotis/scipy,ChanderG/scipy,jseabold/scipy,vhaasteren/scipy,niknow/scipy,aman-iitj/scipy,mikebenfield/scipy,minhlongdo/scipy,Srisai85/scipy,vhaasteren/scipy,mingwpy/scipy,FRidh/scipy,njwilson23/scipy,pyramania/scipy,fredrikw/scipy,witcxc/scipy,WillieMaddox/scipy,anielsen001/scipy,sonnyhu/scipy,pschella/scipy,anntzer/scipy,dch312/scipy,scipy/scipy,behzadnouri/scipy,witcxc/scipy,jonycgn/scipy,petebachant/scipy,pschella/scipy,nvoron23/scipy,dominicelse/scipy,mhogg/scipy,grlee77/scipy,mhogg/scipy,jor-/scipy,bkendzior/scipy,mgaitan/scipy,gef756/scipy,kleskjr/scipy,jakevdp/scipy,hainm/scipy,vigna/scipy,zxsted/scipy,e-q/scipy,zerothi/scipy,zxsted/scipy,zaxliu/scipy,cpaulik/scipy,Stefan-Endres/scipy,raoulbq/scipy,matthewalbani/scipy,zaxliu/scipy,sauliusl/scipy,hainm/scipy,minhlongdo/scipy,e-q/scipy,rmcgibbo/scipy,Shaswat27/scipy,apbard/scipy,mikebenfield/scipy,sauliusl/scipy,felipebetancur/scipy,jor-/scipy,person142/scipy,befelix/scipy,Gillu13/scipy,rgommers/scipy,teoliphant/scipy,efiring/scipy,juliantaylor/scipy,mtrbean/scipy,josephcslater/scipy,ndchorley/scipy,gfyoung/scipy,scipy/scipy,mtrbean/scipy,jjhelmus/scipy,pbrod/scipy,mtrbean/scipy,matthew-brett/scipy,ndchorley/scipy,rmcgibbo/scipy,maniteja123/scipy,minhlongdo/scipy,dch312/scipy,jakevdp/scipy,gfyoung/scipy,surhudm/scipy,behzadnouri/scipy,Newman101/scipy,anntzer/scipy,jsilter/scipy,argriffing/scipy,zerothi/scipy,Srisai85/scipy,cpaulik/scipy,WillieMaddox/scipy,surhudm/scipy,kalvdans/scipy,aeklant/scipy,anielsen001/scipy,chatcannon/scipy,pyramania/scipy,felipebetancur/scipy,sonnyhu/scipy,larsmans/scipy,mingwpy/scipy,Kamp9/scipy,chatcannon/scipy,sauliusl/scipy,pbrod/scipy,vhaasteren/scipy,aarchiba/scipy,surhudm/scipy,sonnyhu/scipy,njwilson23/scipy,vanpact/scipy,ilayn/scipy,aeklant/scipy | tools/win32/build_scripts/pavement.py | tools/win32/build_scripts/pavement.py | options(
setup=Bunch(
name = "scipy-superpack",
)
)
@task
def setup():
print "Setting up package %s" % options.name
| bsd-3-clause | Python | |
cef74f6d84f1d7fec54fd9a314888e7d0e84ac3f | Create telnet-cmdrunner.py | pynetscript/FromZeroToHero | telnet-cmdrunner.py | telnet-cmdrunner.py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
import netmiko
import json
import tools
import sys ### Capture and handle signals past from the Operating System.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL) ### IOERror: Broken pipe
signal.signal(signal.SIGINT, signal.SIG_DFL) ### KeyboardInterrupt: Ctrl-C
### If authentication fails, the script will continue to run.
### If connection times out, the script will continue to run.
netmiko_exceptions = (netmiko.ssh_exception.NetMikoTimeoutException,
netmiko.ssh_exception.NetMikoAuthenticationException)
username, password = tools.get_credentials()
with open('cisco_ios_telnet_devices.json') as dev_file:
cisco_ios_telnet_devices = json.load(dev_file)
domain_name = [ 'ip domain-name a-corp.com']
crypto_key_gen = [ 'crypto key generate rsa label SSH mod 2048']
ssh_commands = [ 'ip ssh rsa keypair-name SSH',
'ip ssh version 2',]
for device in cisco_ios_telnet_devices:
device['username'] = username
device['password'] = password
try:
print()
print('='*79)
print('Connecting to device:', device['ip'])
print('-'*79)
### Establish session to each device in "cisco_ios_telnet_devices.json"
### ** is used to unpack the dictonary for Netmiko
connection = netmiko.ConnectHandler(**device)
print(connection.send_config_set(domain_name))
print('-'*79)
print(connection.send_config_set(crypto_key_gen, delay_factor=10))
print('-'*79)
print(connection.send_config_set(ssh_commands))
print('-'*79)
### Disconnect sessions.
connection.disconnect()
except netmiko_exceptions as e:
print('Failed to:', device['ip'])
print(e)
| mit | Python | |
ecb6390c800260cedddba655f253a8307e096d76 | Create setup.py | hagne/atm-py,lo-co/atm-py,hagne/atm-py,mtat76/atm-py,msrconsulting/atm-py | setup.py | setup.py | from distutils.core import setup
setup(name='atmPy',
version='0.1',
description='Python Distribution Utilities',
author='Hagen Telg and Matt Richardson',
author_email='matt.richardson@msrconsults.com',
packages=['atmPy'],
)
| mit | Python | |
93cb8184fe5fdbf294c1e8f36b45ed8b514b2ce5 | Allow setup file to enable pip installation | rlworkgroup/metaworld,rlworkgroup/metaworld,kschmeckpeper/multiworld | setup.py | setup.py | from distutils.core import setup
setup(
name='multiworld',
packages=('multiworld', ),
)
| mit | Python | |
dad2024344f581aa042f767e4aa473d50a8f78bc | Create individual_dist_label.py | soligschlager/topography,soligschlager/topography,margulies/topography,soligschlager/topography,margulies/topography,soligschlager/topography,margulies/topography,margulies/topography | sandbox/individual_distance/individual_dist_label.py | sandbox/individual_distance/individual_dist_label.py | #!/usr/bin/python
import os, numpy as np, scipy as sp, nibabel.freesurfer as fs
from sklearn.utils.arpack import eigsh
# Set defaults:
base_dir = '/scr/liberia1/LEMON_LSD/LSD_rest_surf'
output_base_dir = '/scr/liberia1'
subjects = [26410]
for subject in subjects:
for hemi in ['lh', 'rh']:
# read in cortical mask
cort = # nodes
dataCorr = # load conn mat and mask out only cortex
fullsize = # length of full cortex
embedding = DoFiedler(dataCorr[cort,cort]) # see below for details
del dataCorr
# reinsert zeros:
fiedler = np.zeros(fullsize)
fiedler[cort] = embedding[1] # check if this reads the first eigenvector correctly
# read in distance matrix
distmat = # read in
# get labels from freesurfer for anatomy
fs_labels = # read in
label_parietal = fs_labels == # XXX # grab parietal mask
for i in [label1, label2, etc]:
label_dist = np.mean(distmat(fs_labels == i))
# mask fiedler by parietal to get peak of DMN in parietal
masked_fiedler = fiedler * label_parietal
if masked_fiedler > mean(fiedler):
anat_dist = label_dist(max(masked_fiedler)) # does that compute elementwise product?
else:
anat_dist = label_dist(min(masked_fiedler)) # does that compute elementwise product?
# save out anat_dist for subject / hemi / anat label
# also create images for quality control: fiedler, masked_fiedler
def DoFiedler(conn):
# prep for embedding
K = (conn + 1) / 2.
v = np.sqrt(np.sum(K, axis=1))
A = K/(v[:, None] * v[None, :])
del K
A = np.squeeze(A * [A > 0])
# diffusion embedding
n_components_embedding = 2
lambdas, vectors = eigsh(A, k=n_components_embedding+1)
del A
lambdas = lambdas[::-1]
vectors = vectors[:, ::-1]
psi = vectors/vectors[:, 0][:, None]
lambdas = lambdas[1:] / (1 - lambdas[1:])
embedding = psi[:, 1:(n_components_embedding + 1 + 1)] * lambdas[:n_components_embedding+1][None, :]
return embedding
| mit | Python | |
a2865b712d0a28e3a0b8943f67703a77b5d90894 | Add a stub for testing _utils | dask-image/dask-ndfourier | tests/test__utils.py | tests/test__utils.py | # -*- coding: utf-8 -*-
| bsd-3-clause | Python | |
181833870da1921e280d2439ae08ed74c7b137a5 | Add test for h5diag | UCBerkeleySETI/blimpy,UCBerkeleySETI/blimpy | tests/test_h5diag.py | tests/test_h5diag.py | from os.path import dirname
import numpy as np
import hdf5plugin
import h5py
from blimpy.h5diag import cmd_tool
from tests.data import voyager_h5, voyager_fil
import pytest
header = [
["fruit", "apple"],
["color", "red"],
["plant", "tree"]
]
DIR = dirname(voyager_fil)
TEST_H5 = DIR + "/test.h5"
TIME_INSTANCES = 8
FREQ_INSTANCES = 16
DATA_BYTESIZE = TIME_INSTANCES * FREQ_INSTANCES * 4
def my_writer(my_class):
data_out = np.ndarray(shape=(TIME_INSTANCES, 1, FREQ_INSTANCES), dtype=float)
for ii in range(TIME_INSTANCES):
for jj in range(FREQ_INSTANCES):
data_out[ii, 0, jj] = 42.0
print("data_out shape:", data_out.shape)
with h5py.File(TEST_H5, "w") as h5:
h5.attrs["CLASS"] = my_class
h5.attrs["VERSION"] = "1.0"
bs_compression = hdf5plugin.Bitshuffle(nelems=0, lz4=True)["compression"]
bs_compression_opts = hdf5plugin.Bitshuffle(nelems=0, lz4=True)["compression_opts"]
dset = h5.create_dataset("data",
data=data_out,
compression=bs_compression,
compression_opts=bs_compression_opts)
dset_mask = h5.create_dataset("mask",
shape=data_out.shape,
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype="uint8")
dset.dims[2].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[0].label = b"time"
dset_mask.dims[2].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[0].label = b"time"
# Copy over header information as attributes
for key, value in header:
dset.attrs[key] = value
def execute_command(args):
print("\ntest_h5diag: args:", args)
cmd_tool(args)
def test_h5diag():
args = [voyager_h5]
execute_command(args)
with pytest.raises(SystemExit):
args = [voyager_fil]
execute_command(args)
my_writer("FRUITY")
with pytest.raises(SystemExit):
args = [TEST_H5]
execute_command(args)
with h5py.File(TEST_H5, "w") as h5:
h5.attrs["VERSION"] = "42.0"
with pytest.raises(SystemExit):
args = [TEST_H5]
execute_command(args)
with h5py.File(TEST_H5, "w") as h5:
h5.attrs["CLASS"] = "FILTERBANK"
with pytest.raises(SystemExit):
args = [TEST_H5]
execute_command(args)
with h5py.File(TEST_H5, "w") as h5:
h5.attrs["CLASS"] = "FILTERBANK"
h5.attrs["VERSION"] = "42.0"
with pytest.raises(SystemExit):
args = [TEST_H5]
execute_command(args)
my_writer("FILTERBANK")
args = [TEST_H5]
execute_command(args)
if __name__ == "__main__":
test_h5diag()
| bsd-3-clause | Python | |
ec9944bdb7945543c95ec43d627d213536d5735a | Add monitor for volume tags | blrm/openshift-tools,openshift/openshift-tools,blrm/openshift-tools,openshift/openshift-tools,drewandersonnz/openshift-tools,drewandersonnz/openshift-tools,blrm/openshift-tools,drewandersonnz/openshift-tools,drewandersonnz/openshift-tools,openshift/openshift-tools,drewandersonnz/openshift-tools,openshift/openshift-tools,drewandersonnz/openshift-tools,blrm/openshift-tools,blrm/openshift-tools,openshift/openshift-tools,blrm/openshift-tools,openshift/openshift-tools | scripts/monitoring/cron-send-snapshots-tags-check.py | scripts/monitoring/cron-send-snapshots-tags-check.py | #!/usr/bin/env python
""" Check Persistent Volumes Snapshot Tags """
# We just want to see any exception that happens
# don't want the script to die under any cicumstances
# script must try to clean itself up
# pylint: disable=broad-except
# main() function has a lot of setup and error handling
# pylint: disable=too-many-statements
# main() function raises a captured exception if there is one
# pylint: disable=raising-bad-type
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
import argparse
import datetime
import logging
import time
import re
import os
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
#pylint: disable=maybe-no-member
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
from openshift_tools.cloud.aws.ebs_util import EbsUtil
from openshift_tools.cloud.aws.ebs_snapshotter import SUPPORTED_SCHEDULES, EbsSnapshotter
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ocutil = OCUtil()
DAILY_SCHEDULE = "daily"
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
return ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
return ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='Check Volume Snapshots ')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--aws-creds-profile', required=False,
help='The AWS credentials profile to use.')
parser.add_argument('--region', required=True,
help='The region that we want to process snapshots in')
return parser.parse_args()
def send_metrics(status):
""" send data to MetricSender"""
logger.debug("send_metrics()")
ms_time = time.time()
ms = MetricSender()
logger.info("Send data to MetricSender")
ms.add_metric({'openshift.master.pv.snapshots.tags.status': status})
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def get_pv_volume_ids():
"""get all the ebs volumes id that used by persistent volume"""
pv_info = runOCcmd_yaml(" get pv")
volumes = {}
for pv in pv_info['items']:
pv_name = pv['metadata']['name']
if "awsElasticBlockStore" in pv['spec']:
volume_id = pv['spec']['awsElasticBlockStore']['volumeID'].split("/")[-1]
volumes[pv_name] = volume_id
return volumes
def validate_volume_tag(ebs_snapshotter, volume_id, snapshot_tag):
volume = ebs_snapshotter.ec2.get_all_volumes(volume_ids=[volume_id])[0]
if "snapshot" in volume.tags and volume.tags['snapshot'] == snapshot_tag:
return True
return False
def main():
""" report pv usage """
logger.info('################################################################################')
logger.info(' Starting Volume Snapshot Tag Checks - %s', datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
logger.info('################################################################################')
logger.debug("main()")
args = parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
if args.aws_creds_profile:
os.environ['AWS_PROFILE'] = args.aws_creds_profile
ebs_snapshotter = EbsSnapshotter(args.region, verbose=True)
if not ebs_snapshotter.is_region_valid(args.region):
logger.info("Invalid region")
sys.exit(1)
else:
logger.info("Region: %s:", args.region)
ebs_util = EbsUtil(args.region, verbose=True)
ebs_snapshotter = EbsSnapshotter(args.region, verbose=True)
volumes = get_pv_volume_ids()
status = 0
for volume in volumes:
logger.info('Checking pv: %s, volume ID: %s', volume, volumes[volume])
has_tag = validate_volume_tag(ebs_snapshotter, volumes[volume], DAILY_SCHEDULE)
if not has_tag:
logger.warn('pv :%s has no "snapshot:daily" tags', volume)
status = status + 1
send_metrics(status)
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
a986397ca1bdc3bdc8894fab8b336803c172b295 | add settings file for staging (has a database url but no Sentry) | texastribune/txlege84,texastribune/txlege84,texastribune/txlege84,texastribune/txlege84 | txlege84/txlege84/settings/staging.py | txlege84/txlege84/settings/staging.py | #######################
# PRODUCTION SETTINGS #
#######################
import dj_database_url
from .base import *
LOGGING = {
'version': 1,
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
}
},
}
######################
# HOST CONFIGURATION #
######################
# https://docs.djangoproject.com/en/1.7/ref/settings/#allowed-hosts
# https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['.texastribune.org'] #FIXME
##########################
# DATABASE CONFIGURATION #
##########################
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config()
}
#######################
# CACHE CONFIGURATION #
#######################
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
############################
# SECRET KEY CONFIGURATION #
############################
# https://docs.djangoproject.com/en/1.7/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
################################
# DJANGO STORAGE CONFIGURATION #
################################
# http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html
# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
| mit | Python | |
f5ba686196866c78dfeafb34a5f78f5cfc2c50bd | Add buildbot.py with required coverage | steinwurf/sak,steinwurf/sak | buildbot.py | buildbot.py | #!/usr/bin/env python
# encoding: utf-8
project_name = 'sak'
def configure(options):
pass
def build(options):
pass
def run_tests(options):
pass
def coverage_settings(options):
options['required_line_coverage'] = 94.9
| bsd-3-clause | Python | |
7bbf99a60526e1b15aaf7a7fc9f5b7d6889a9efc | Create getnotfound.py | bontchev/wlscrape | tools/getnotfound.py | tools/getnotfound.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import requests
import json
import wget
import sys
import os
__author__ = "Vesselin Bontchev <vbontchev@yahoo.com>"
__license__ = "GPL"
__VERSION__ = "1.00"
def error(e):
print("Error: %s." % e, file=sys.stderr)
sys.exit(-1)
def makeOutputDir(pageNum):
outputDir = str(pageNum).zfill(3)
try:
if (not os.path.exists(outputDir)):
os.mkdir(outputDir)
except Exception as e:
error(e)
return outputDir
def downloadTheFiles(jsonData, hashes, elementsPerDir):
seen = set()
i = 0
paginate = False
outputDir = ""
if ((elementsPerDir > 0) and (len(jsonData) > elementsPerDir)):
paginate = True
pageNum = 0
elementNum = 0
outputDir = makeOutputDir(pageNum)
for element in jsonData:
url = element["url"]
ext = element["ext"]
hash = element["md5"].upper()
if (hash in hashes and not hash in seen):
seen.add(hash)
i += 1
fileName = hash + "." + ext
if (paginate):
fileName = os.path.join(outputDir, fileName)
elementNum += 1
if (elementNum >= elementsPerDir):
elementNum = 0
pageNum += 1
outputDir = makeOutputDir(pageNum)
print("[" + str(i) + "] " + url + " -> " + fileName, file=sys.stderr)
try:
outputFile = wget.download(url, out=fileName)
except Exception as e:
error(e)
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(version="%(prog)s version " + __VERSION__,
description="Downloads suspected malware from Wikileaks.")
parser.add_argument("-e", "--elements", type=int, help="elements per page")
parser.add_argument("jsonfile", help="JSON data file")
parser.add_argument("notfoundhashes", help="file with MD5 hashes of unknown files")
args = parser.parse_args()
elements = args.elements
if (elements < 1):
elements = 0
try:
with open(args.jsonfile, "r") as contentFile:
content = contentFile.read()
jsonData = json.loads(content)
with open(args.notfoundhashes, "r") as hashFile:
hashes = [line.split()[0].upper() for line in hashFile]
except Exception as e:
error(e)
downloadTheFiles(jsonData, hashes, elements)
sys.exit(0)
| mit | Python | |
1a1e9123313fdedab14700ead90748d9e6182a42 | Add revision for new boardmoderator columns | Floens/uchan,Floens/uchan,Floens/uchan,Floens/uchan,Floens/uchan | migrations/versions/da8b38b5bdd5_add_board_moderator_roles.py | migrations/versions/da8b38b5bdd5_add_board_moderator_roles.py | """Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
| mit | Python | |
4213e9756872cd3a64ca75f374b5bc292e08e3be | add scraping script | shunk031/ameblo-crawler | scrapingArticle.py | scrapingArticle.py | # -*- coding: utf-8 -*-
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
def scrapingArticleText(url):
"""
引数から得たURLからブログ本文を取得して
一文ずつ区切ったstringのlistをreturnする
"""
try:
html = urlopen(url)
except HTTPError as e:
print(e)
return None
try:
soup = BeautifulSoup(html.read(), "lxml")
lawArticleText = ""
date = soup.find('time').string
for i in soup.findAll("", {"class": "articleText"}):
lawArticleText += i.get_text()
articleText = lawArticleText.replace(u"\xa0", "\n")
articleText = articleText.split("\n")
while articleText.count("") > 0:
articleText.remove("")
except AttributeError as e:
return None
return dict(date=date, article=articleText)
def main():
url = "http://ameblo.jp/ogurayui-0815/entry-12145717070.html"
result = scrapingArticleText(url)
if result == None:
print("Can't find the text")
else:
print(result['date'])
for i in result['article']:
print(i)
if __name__ == '__main__':
main()
| mit | Python | |
0c5f2c0003ceb1568aa4f6dccce5f6de42b5462e | Add a simple monitoring solution | torhve/Amatyr,torhve/Amatyr,torhve/Amatyr | scripts/monitor.py | scripts/monitor.py | #!/usr/bin/python
# -*- coding: UTF-8
# Copyright: 2014 Tor Hveem <thveem>
# License: GPL3
#
# Simple Python script for polling amatyr installation and check latest date
#
# Usage: python monitor.py <AMATYR BASEURL> <EMAIL RECIPIENT>
# Check every 5 minute in crontab:
# */5 * * * * <AMATYRPATH>/scripts/monitor.py
#
import urllib2
from datetime import datetime
import time
import sys
import simplejson
from email.mime.text import MIMEText
from subprocess import Popen, PIPE
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
SAVEFILENAME = os.path.join(BASE_DIR, '.last')
json = urllib2.urlopen('http://%s/api/now'%(sys.argv[1])).read()
json = simplejson.loads(json)
fmt = '%Y-%m-%d %H:%M:%S'
jsontimestamp = json[0]['datetime']
yrtime = datetime.strptime(jsontimestamp, fmt)
now = datetime.now()
time_d = now - yrtime
# Give it an hour
if time_d.total_seconds() > 3600:
# Check if we alerted for this timestamp before
try:
oldalert = file(SAVEFILENAME, 'r').read()
if oldalert == jsontimestamp:
sys.exit(1)
except IOError:
'File does not exist'
# Save timestamp
file(SAVEFILENAME, 'w').write(jsontimestamp)
# Alert.
msg = MIMEText("Please help me.")
msg["From"] = "amatyr@%s" %BASEURL
msg["To"] = sys.argv[2]
msg["Subject"] = "AmatYr stopped %s ago" %time_d
p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
p.communicate(msg.as_string())
| bsd-3-clause | Python | |
23dab9c4a0220a7a35b4a88daeda79bd65bdeb3b | fix in range | OstapHEP/ostap,OstapHEP/ostap,OstapHEP/ostap,OstapHEP/ostap | ostap/fitting/tests/test_in_range_2d.py | ostap/fitting/tests/test_in_range_2d.py | import sys
from ostap.core.pyrouts import *
import ROOT, random, time
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import cpp, VE, dsID
from ostap.logger.utils import rooSilent
from builtins import range
from ostap.fitting.background import make_bkg
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'in_range' )
else : logger = getLogger ( __name__ )
## make simple test mass
m_x = ROOT.RooRealVar ( 'm_x' , 'Some test mass(X)' , 0 , 5 )
m_y = ROOT.RooRealVar ( 'm_y' , 'Some test mass(Y)' , 6 , 10 )
## book very simple data set
varset = ROOT.RooArgSet ( m_x , m_y)
dataset = ROOT.RooDataSet ( dsID() , 'Test Data set-1' , varset )
m1 = VE(3,0.10**2)
m2 = VE(7,0.10**2)
## fill it with three gausissians, 5k events each
N_ss = 5000
N_sb = 1000
N_bs = 500
N_bb = 100
random.seed(0)
## fill it : 5000 events Gauss * Gauss *Gauss
for i in range(0,N_ss) :
m_x.value = m1.gauss()
m_y.value = m2.gauss()
dataset.add ( varset )
## fill it : 500 events Gauss * const * Gauss
for i in range(0,N_sb) :
m_x.value = m1.gauss()
m_y.value = random.uniform ( *m_y.minmax() )
dataset.add ( varset )
## fill it : 500 events const * Gauss * Gauss
for i in range(0,N_bs) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = m2.gauss()
dataset.add ( varset )
## fill it : 1000 events const * const *Gauss
for i in range(0,N_bb) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = random.uniform ( *m_y.minmax() )
dataset.add ( varset )
logger.info ('Dataset: %s' % dataset )
## various fit components
signal_x1 = Models.Gauss_pdf ( 'G1x' , xvar = m_x , mean = m1.value() , sigma = m1.error() )
signal_y1 = Models.Gauss_pdf ( name='G1y' , xvar = m_y , mean = m2.value() , sigma = m2.error() )
bkg_x= make_bkg ( -1 , 'Bx' , m_x )
bkg_y= make_bkg ( -1 , name= 'By' , xvar =m_y )
model = Models.Fit2D (
name = 'fit_comp',
signal_x = signal_x1,
signal_y = signal_y1,
bkg_1x = bkg_x ,
bkg_1y = bkg_y ,
)
with rooSilent() :
## components
model.SS.setVal ( 5000 )
model.SB.setVal ( 1000 )
model.BS.setVal ( 500 )
model.BB.setVal ( 100 )
r = model.fitTo ( dataset , ncpu=8 )
def draw_x() :
dataset.m_y.setRange ( 'fit' , 8,10. )
model.yvar.setRange ( 'fit' , 8,10. )
model.draw1(dataset,nbins=200,in_range=(6,8))
time.sleep (2)
model.draw1(dataset,nbins=200, in_range='fit')
time.sleep (2)
def draw_y() :
dataset.m_x.setRange ( 'fit2' , 0,2.5 )
model.xvar.setRange ( 'fit2' , 0,2.5 )
model.draw2(dataset,nbins=200, in_range=(2.5,5))
time.sleep (2)
model.draw2(dataset,nbins=200, in_range='fit2')
time.sleep (2)
if '__main__' == __name__ :
## draw x projections
draw_x()
## draw y projections
draw_y()
| bsd-3-clause | Python | |
5e20df222456fe17fa78290e8fa08b051a951b38 | Add events.py | irqed/octokit.py | octokit/resources/events.py | octokit/resources/events.py | # encoding: utf-8
"""Methods for the Events API
http://developer.github.com/v3/activity/events/
http://developer.github.com/v3/issues/events/
"""
| mit | Python | |
3b22994b26db1c224ef0076bf9a031f661953ada | create Feed of latest articles on the current site. | opps/opps,opps/opps,jeanmask/opps,opps/opps,opps/opps,williamroot/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,jeanmask/opps,williamroot/opps,YACOWS/opps,jeanmask/opps | opps/articles/views/feed.py | opps/articles/views/feed.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.syndication.views import Feed
from django.contrib.sites.models import get_current_site
from opps.articles.models import Article
class ArticleFeed(Feed):
link = "/RSS"
def __call__(self, request, *args, **kwargs):
self.site = get_current_site(request)
return super(ArticleFeed, self).__call__(request, *args, **kwargs)
def title(self):
return "{0}'s news".format(self.site.name)
def description(self):
return "Latest news on {0}'s".format(self.site.name)
def items(self):
return Article.objects.filter(site=self.site).order_by(
'-date_available')[:40]
| mit | Python | |
111eb59d2390a008cad5edc8e18456d42b7f7117 | Add hearthPwnCrawler.py, for crawling deck strings from hearthPwn websize. | lanhin/deckAdvisor | hearthPwnCrawler.py | hearthPwnCrawler.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-07-17 08:52:38 by lanhin
# Project: Deckstring Crawler
#
# Use this file as a pyspider script
# To crawl deck from http://www.hearthpwn.com/decks
# Refer to http://docs.pyspider.org/en/latest/Quickstart/ for more details
from pyspider.libs.base_handler import *
import re
class Handler(BaseHandler):
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self):
self.crawl('http://www.hearthpwn.com/decks', callback=self.index_page)
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
if re.match("http://www.hearthpwn.com/decks/", each.attr.href):
self.crawl(each.attr.href, callback=self.detail_page)
if re.match("http://www.hearthpwn.com/decks\?page=", each.attr.href):
self.crawl(each.attr.href, callback=self.index_page)
@config(priority=2)
def detail_page(self, response):
new_dict = {"url": response.url,
"title": response.doc('title').text(),
"deckstring": [each.attr("data-clipboard-text") for each in response.doc('[data-ga-click-event-tracking-label="Top"]').items()][0],
"date": [each for each in response.doc('[class="deck-details"]')('li').items()][-1].text()
}
if response.doc('[class="is-std"]').text():
new_dict['type'] = 'Standard'
else:
new_dict['type'] = 'Wild'
return new_dict
| mit | Python | |
4cfc07a275a473ed14f7c99150b2f233c680d7c0 | Add db dumping utility | ekollof/pymetrics | dbcat.py | dbcat.py | #!/usr/bin/env python
import sys
import anydbm as dbm
def main():
for k,v in dbm.open(sys.argv[1]).iteritems():
print "key: {0:s} value: {1:s}".format(k, v)
if __name__ == '__main__':
sys.exit(main()) | bsd-3-clause | Python | |
1631731657af28c275b35f9b084807e4f244c334 | debug module. initial code | albertz/music-player,albertz/music-player,albertz/music-player,albertz/music-player,albertz/music-player,albertz/music-player | debug.py | debug.py | # -*- coding: utf-8 -*-
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2013, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
# This is the debug module: tools to debug MusicPlayer.
# This is mostly for debugging at runtime.
# - memory profiling. searching for mem-leaks
# - runtime profiling. searching slow code paths
# - other bugs
# Use socketcontrol-interactiveclient.py for interactively control.
# After being connected, just run `import debug` and use the functions from here.
import sys, os
def getDevelPath():
def check(path):
path = os.path.expanduser(path)
if not os.path.isdir(path): return None
if not os.path.isdir(path + "/.git"): return None
return path
for path in [
# send me a request to include your custom dir.
# if it isn't too unusual, i might add it here.
"~/Programmierung/music-player",
"~/Projects/music-player",
"~/Coding/music-player",
]:
path = check(path)
if path: return path
return None
def addDevelSysPath():
"adds your MusicPlayer development directory to sys.path"
path = getDevelPath()
assert path, "devel path not found"
sys.path = [path] + sys.path
| bsd-2-clause | Python | |
ed43384ece07bf1a02529d2f79423e96c8283443 | Add mangling experimental sample | eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples | src_clang/experimental/show-mangle.py | src_clang/experimental/show-mangle.py | import pprint
import sys
import clang.cindex
def get_cursor(source, spelling):
"""Obtain a cursor from a source object.
This provides a convenient search mechanism to find a cursor with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If the cursor is not found, None is returned.
"""
# Convenience for calling on a TU.
root_cursor = (source if isinstance(source, clang.cindex.Cursor)
else source.cursor)
for cursor in root_cursor.walk_preorder():
if cursor.spelling == spelling:
return cursor
return None
src = '''\
int foo(int, int);
'''
tu = clang.cindex.TranslationUnit.from_source('t.cpp',
['-x', 'c++'],
unsaved_files=[('t.cpp', src)])
foo = get_cursor(tu, 'foo')
print(foo.mangled_name)
| unlicense | Python | |
472d23ec5706d081cbdbf32687884b133fdf6864 | Add benchmark for ogbg_molpcba example. | google/flax,google/flax | examples/ogbg_molpcba/ogbg_molpcba_benchmark.py | examples/ogbg_molpcba/ogbg_molpcba_benchmark.py | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark for the ogbg_molpcba example."""
import time
from absl import flags
from absl.testing import absltest
import main
from configs import default
from configs import test
from flax.testing import Benchmark
import jax
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
FLAGS = flags.FLAGS
class OgbgMolpcbaBenchmark(Benchmark):
"""Benchmarks for the ogbg_molpcba Flax example."""
def test_1x_v100(self):
"""Run training with default config for ogbg_molpcba on a v100 GPU."""
workdir = self.get_tmp_model_dir()
config = default.get_config()
FLAGS.workdir = workdir
FLAGS.config = config
start_time = time.time()
main.main([])
benchmark_time = time.time() - start_time
summaries = self.read_summaries(workdir)
# Summaries contain all the information necessary for
# the regression metrics.
wall_time, _, test_accuracy = zip(*summaries['test_accuracy'])
wall_time = np.array(wall_time)
sec_per_epoch = np.mean(wall_time[1:] - wall_time[:-1])
end_test_accuracy = test_accuracy[-1]
_, _, test_aps = zip(*summaries['test_mean_average_precision'])
end_test_mean_average_precision = test_aps[-1]
_, _, validation_accuracy = zip(*summaries['validation_accuracy'])
end_validation_accuracy = validation_accuracy[-1]
_, _, validation_aps = zip(*summaries['validation_mean_average_precision'])
end_validation_mean_average_precision = validation_aps[-1]
# Assertions are deferred until the test finishes, so the metrics are
# always reported and benchmark success is determined based on *all*
# assertions.
self.assertGreaterEqual(end_test_mean_average_precision, 0.24)
self.assertGreaterEqual(end_validation_mean_average_precision, 0.25)
# Use the reporting API to report single or multiple metrics/extras.
self.report_wall_time(benchmark_time)
self.report_metrics({
'sec_per_epoch':
sec_per_epoch,
'test_accuracy':
end_test_accuracy,
'test_mean_average_precision':
end_test_mean_average_precision,
'validation_accuracy':
end_validation_accuracy,
'validation_mean_average_precision':
end_validation_mean_average_precision,
})
self.report_extras({
'model_name': 'Graph Convolutional Network',
'description': 'GPU (1x V100) test for ogbg_molpcba.',
'implementation': 'linen',
})
def test_cpu(self):
"""Run training with test config for ogbg_molpcba on CPU."""
workdir = self.get_tmp_model_dir()
config = test.get_config()
FLAGS.workdir = workdir
FLAGS.config = config
start_time = time.time()
main.main([])
benchmark_time = time.time() - start_time
summaries = self.read_summaries(workdir)
# Summaries contain all the information necessary for
# the regression metrics.
wall_time, _, test_accuracy = zip(*summaries['test_accuracy'])
wall_time = np.array(wall_time)
sec_per_epoch = np.mean(wall_time[1:] - wall_time[:-1])
end_test_accuracy = test_accuracy[-1]
_, _, test_aps = zip(*summaries['test_mean_average_precision'])
end_test_mean_average_precision = test_aps[-1]
_, _, validation_accuracy = zip(*summaries['validation_accuracy'])
end_validation_accuracy = validation_accuracy[-1]
_, _, validation_aps = zip(*summaries['validation_mean_average_precision'])
end_validation_mean_average_precision = validation_aps[-1]
# Use the reporting API to report single or multiple metrics/extras.
self.report_wall_time(benchmark_time)
self.report_metrics({
'sec_per_epoch':
sec_per_epoch,
'test_accuracy':
end_test_accuracy,
'test_mean_average_precision':
end_test_mean_average_precision,
'validation_accuracy':
end_validation_accuracy,
'validation_mean_average_precision':
end_validation_mean_average_precision,
})
self.report_extras({
'model_name': 'Graph Convolutional Network',
'description': 'CPU test for ogbg_molpcba.',
'implementation': 'linen',
})
if __name__ == '__main__':
absltest.main()
| apache-2.0 | Python | |
0caaf977096d5936747ad4931d14041675a9864a | create a paths utility to better work with ceph paths | trhoden/ceph-deploy,ddiss/ceph-deploy,ktdreyer/ceph-deploy,zhouyuan/ceph-deploy,SUSE/ceph-deploy,ceph/ceph-deploy,rtulke/ceph-deploy,isyippee/ceph-deploy,isyippee/ceph-deploy,osynge/ceph-deploy,zhouyuan/ceph-deploy,Vicente-Cheng/ceph-deploy,jumpstarter-io/ceph-deploy,alfredodeza/ceph-deploy,trhoden/ceph-deploy,rtulke/ceph-deploy,shenhequnying/ceph-deploy,Vicente-Cheng/ceph-deploy,branto1/ceph-deploy,branto1/ceph-deploy,alfredodeza/ceph-deploy,codenrhoden/ceph-deploy,shenhequnying/ceph-deploy,imzhulei/ceph-deploy,imzhulei/ceph-deploy,ghxandsky/ceph-deploy,ghxandsky/ceph-deploy,SUSE/ceph-deploy,codenrhoden/ceph-deploy,ktdreyer/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ceph/ceph-deploy,jumpstarter-io/ceph-deploy,osynge/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ddiss/ceph-deploy | ceph_deploy/util/paths.py | ceph_deploy/util/paths.py | from os.path import join
from ceph_deploy.util import constants
class mon(object):
_base = join(constants.mon_path, 'ceph-')
@classmethod
def path(cls, hostname):
return "%s%s" % (cls._base, hostname)
@classmethod
def done(cls, hostname):
return join(cls.path(hostname), 'done')
@classmethod
def init(cls, hostname, init):
return join(cls.path(hostname), init)
@classmethod
def keyring(cls, cluster, hostname):
keyring_file = '%s-%s.mon.keyring' % (cluster, hostname)
return join(constants.tmp_path, keyring_file)
| mit | Python | |
1aeb34f003e5d437ac55c560ef062b22e9f02c0a | Define health blueprint. | soasme/rio,soasme/rio,soasme/rio | rio/blueprints/health.py | rio/blueprints/health.py | # -*- coding: utf-8 -*-
from flask import Blueprint
bp = Blueprint('health', __name__)
@bp.route('/')
def index():
return 'OK'
| mit | Python | |
a8b48d9174ce9c30166c0c2a8011c2c40624c4bd | Add a spider for Planned Parenthood | iandees/all-the-places,iandees/all-the-places,iandees/all-the-places | locations/spiders/planned_parenthood.py | locations/spiders/planned_parenthood.py | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class PlannedParenthoodSpider(scrapy.Spider):
name = "planned_parenthood"
allowed_domains = ["www.plannedparenthood.org"]
start_urls = (
'https://www.plannedparenthood.org/health-center',
)
def parse(self, response):
state_urls = response.xpath('//ul[@class="quicklist-list"]/li/a/@href').extract()
for path in state_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_state,
)
def parse_state(self, response):
venue_urls = response.xpath('//ul[@class="quicklist-list"]/li/p/a/@href').extract()
for path in venue_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_venue,
)
def parse_venue(self, response):
properties = {
'addr:full': response.xpath('//*[@itemprop="streetAddress"]/text()')[0].extract(),
'addr:city': response.xpath('//*[@itemprop="addressLocality"]/text()')[0].extract(),
'addr:state': response.xpath('//*[@itemprop="addressRegion"]/text()')[0].extract(),
'addr:postcode': response.xpath('//*[@itemprop="postalCode"]/text()')[0].extract(),
'ref': response.url,
'website': response.url,
}
map_image_url = response.xpath('//img[@class="address-map"]/@src')[0].extract()
match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url)
lon_lat = [
float(match.group(2)),
float(match.group(1)),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
| mit | Python | |
397f31c8b43da123f2a55350a7d572b3a13431a6 | Add module to ease handling of CKAN filestores. | etalab/ckan-toolbox | ckantoolbox/filestores.py | ckantoolbox/filestores.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# CKAN-Toolbox -- Various modules that handle CKAN API and data
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Emmanuel Raviart
# http://gitorious.org/etalab/ckan-toolbox
#
# This file is part of CKAN-Toolbox.
#
# CKAN-Toolbox is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CKAN-Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Toolbox to use CKAN FileStore API"""
import datetime
import itertools
import json
import mimetools
import mimetypes
import os
import urllib2
import urlparse
from biryani1 import strings
class MultiPartForm(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = mimetools.choose_boundary()
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the request. Each part is separated by a boundary string.
# Once the list is built, return a string where each line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields.
parts.extend(
[
part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload.
parts.extend(
[
part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % (field_name, filename),
'Content-Type: %s' % content_type,
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((str(name), strings.deep_encode(value)))
def add_file_bytes(self, fieldname, filename, file_bytes, mimetype = None):
"""Add a file to be uploaded."""
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((str(fieldname), strings.deep_encode(filename), str(mimetype), file_bytes))
@property
def content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def upload_file(site_url, filename, file_data, headers):
assert 'Authorization' in headers, headers
# See ckan/public/application.js:makeUploadKey for why the file_key is derived this way.
timestamp = datetime.datetime.now().isoformat().replace(':', '').split('.')[0]
normalized_name = os.path.basename(filename).replace(' ', '-')
file_key = u'{}/{}'.format(timestamp, normalized_name)
request = urllib2.Request(urlparse.urljoin(site_url, u'/api/storage/auth/form/{}'.format(file_key)),
headers = headers)
response = urllib2.urlopen(request)
file_upload_fields = json.loads(response.read())
form = MultiPartForm()
for field in file_upload_fields['fields']:
form.add_field(field['name'], unicode(field['value']).encode('utf-8'))
form.add_file_bytes('file', file_key.encode('utf-8'), file_data)
form_bytes = str(form)
form_headers = headers.copy()
form_headers.update({
'Content-Length': len(form_bytes),
'Content-Type': form.content_type,
})
request = urllib2.Request(unicode(urlparse.urljoin(site_url, file_upload_fields['action'])).encode('utf-8'),
headers = form_headers)
request.add_data(form_bytes)
response = urllib2.urlopen(request)
response_text = response.read()
request = urllib2.Request(urlparse.urljoin(site_url, u'/api/storage/metadata/{}'.format(file_key)),
headers = headers)
response = urllib2.urlopen(request)
response_text = response.read()
file_metadata = json.loads(response_text)
return file_metadata
| agpl-3.0 | Python | |
0331bffc755ad4234edcca3edaf1b9697b8ae8c3 | Create A.py | Pouf/CodingCompetition,Pouf/CodingCompetition | Google-Code-Jam/2010-Africa/A.py | Google-Code-Jam/2010-Africa/A.py | mit | Python | ||
c568256dac3c13f6740d2a2df5a8a848e2f7d68e | check in new stream settings file | RCOSDP/waterbutler,felliott/waterbutler,CenterForOpenScience/waterbutler | waterbutler/core/streams/settings.py | waterbutler/core/streams/settings.py | from waterbutler import settings
config = settings.child('STREAMS_CONFIG')
ZIP_EXTENSIONS = config.get('ZIP_EXTENSIONS', '.zip .gz .bzip .bzip2 .rar .xz .bz2 .7z').split(' ')
| apache-2.0 | Python | |
5bea532c7651faacb163745fbbf28fa4f53ba438 | add predicting-office-space-price | zeyuanxy/hacker-rank,zeyuanxy/hacker-rank,EdisonCodeKeeper/hacker-rank,zeyuanxy/hacker-rank,EdisonAlgorithms/HackerRank,EdisonCodeKeeper/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonAlgorithms/HackerRank | ai/machine-learning/predicting-office-space-price/predicting-office-space-price.py | ai/machine-learning/predicting-office-space-price/predicting-office-space-price.py | import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
if __name__ == "__main__":
(f, n) = map(int, raw_input().split())
x = []
y = []
poly = PolynomialFeatures(degree = 4)
for i in range(n):
v = map(float, raw_input().split())
x.append(poly.fit_transform(v[:-1])[0])
y.append(v[-1])
clf = linear_model.BayesianRidge()
clf.fit(x, y)
tc = int(raw_input())
for i in range(tc):
v = map(float, raw_input().split())
print clf.predict(poly.transform(v))[0]
| mit | Python | |
45fea3847e2800a920ccb06e102ebaf9a5f9a4ce | Add forgotten migration for newly introduced default ordering | GISAElkartea/tresna-kutxa,GISAElkartea/tresna-kutxa,GISAElkartea/tresna-kutxa,GISAElkartea/tresna-kutxa | tk/material/migrations/0002_auto_20170704_2155.py | tk/material/migrations/0002_auto_20170704_2155.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 19:55
from __future__ import unicode_literals
from django.db import migrations
import localized_fields.fields.field
class Migration(migrations.Migration):
dependencies = [
('material', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='approval',
options={'ordering': ['-requested'], 'verbose_name': 'Approval', 'verbose_name_plural': 'Approvals'},
),
migrations.AlterModelOptions(
name='goal',
options={'ordering': ['name'], 'verbose_name': 'Goal', 'verbose_name_plural': 'Goals'},
),
migrations.AlterModelOptions(
name='groupfeature',
options={'ordering': ['name'], 'verbose_name': 'Group feature', 'verbose_name_plural': 'Group features'},
),
migrations.AlterModelOptions(
name='location',
options={'ordering': ['name'], 'verbose_name': 'Location', 'verbose_name_plural': 'Locations'},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name'], 'verbose_name': 'Subject', 'verbose_name_plural': 'Subjects'},
),
migrations.AlterField(
model_name='goal',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='groupfeature',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='location',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='subject',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
]
| agpl-3.0 | Python | |
af0486cd767564cda7259aa30a0d7c90420e226e | Add get json example | designiot/code,phodal/iot-code,designiot/code,phodal/iot-code,designiot/code,designiot/code,phodal/iot-code,phodal/iot-code | chapter2/get.py | chapter2/get.py | import urllib2,json
results = urllib2.urlopen('http://192.168.168.84/api.json').read()
json.loads(results)['led'] | mit | Python | |
1acbad02071a4d1ef953bc2c0643525e5d681d54 | Add in a script to run the linter manually | Khan/khan-linter,Khan/khan-linter,Khan/khan-linter,Khan/khan-linter | runlint.py | runlint.py | #!/usr/bin/env python
import optparse
import sys
from closure_linter import checker
from closure_linter import error_fixer
from closure_linter import gjslint
USAGE = """%prog [options] [file1] [file2]...
Run a JavaScript linter on one or more files.
This will invoke the linter, and optionally attempt to auto-fix style-violations on the specified JavaScript files.
"""
def check_files(filenames):
fake_args = [gjslint.__file__, '--nobeep'] + filenames
return gjslint.main(argv=fake_args) == 0
def fix_files(filenames):
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
for filename in filenames:
style_checker.Check(filename)
return 0
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option('--autofix',
dest='autofix',
action='store_true',
default=False,
help='Whether or not to autofix')
options, args = parser.parse_args()
if options.autofix:
return fix_files(args)
else:
return check_files(args)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | Python | |
3e5d6e5dd31193f42ebddaeff856bfe53703a19e | Add script to get evidence sources | sorgerlab/indra,sorgerlab/indra,bgyori/indra,pvtodorov/indra,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,johnbachman/belpy,bgyori/indra,johnbachman/indra,pvtodorov/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/belpy,pvtodorov/indra,pvtodorov/indra,johnbachman/indra,johnbachman/belpy | models/fallahi_eval/evidence_sources.py | models/fallahi_eval/evidence_sources.py | from util import pklload
from collections import defaultdict
import indra.tools.assemble_corpus as ac
if __name__ == '__main__':
# Load cached Statements just before going into the model
stmts = pklload('pysb_stmts')
# Start a dictionary for source counts
sources_count = defaultdict(int)
# Count statements according to sources of evidence
for stmt in stmts:
sources = tuple(sorted(list(set([ev.source_api for ev in stmt.evidence]))))
sources_count[sources] += 1
# Statements from databases only
db_only = 0
# Statements from reading only
reading_only = 0
# Statements from databases and reading
mixture = 0
# Database sources
dbs = set(['bel', 'biopax', 'phosphosite', 'signor'])
# Reader sources
readers = set(['reach', 'trips', 'sparser', 'r3'])
for k, v in sources_count.items():
d = set(k).intersection(dbs)
r = set(k).intersection(readers)
if d and r:
mixture += v
if d and not r:
db_only += v
if r and not d:
reading_only += v
for k, v in sorted(sources_count.items(), key=lambda x: x[1]):
print(k, v)
| bsd-2-clause | Python | |
276435cc3b4f77dc16dde4a73cd930e461e1ef47 | Implement LM in defn/lm.py | gchrupala/reimaginet,gchrupala/reimaginet | imaginet/defn/lm.py | imaginet/defn/lm.py | from funktional.layer import Layer, Dense, StackedGRU, StackedGRUH0, \
Embedding, OneHot, clipped_rectify, CrossEntropy, \
last, softmax3d, params
import funktional.context as context
from funktional.layer import params
import imaginet.task
from funktional.util import autoassign
import funktional.util as util
import theano.tensor as T
import theano
import zipfile
import numpy
import StringIO
import json
import cPickle as pickle
class Decoder(Layer):
def __init__(self, size_vocab, size_embed, size, depth):
autoassign(locals())
self.Embed = Embedding(self.size_vocab, self.size_embed)
self.GRU = StackedGRUH0(self.size_embed, self.size, self.depth, activation=clipped_rectify)
def params(self):
return params(self.Embed, self.GRU)
def __call__(self, out_prev):
return self.GRU(self.Embed(out_prev))
class LM(imaginet.task.Task):
def __init__(self, config):
autoassign(locals())
self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr'])
self.Decode = Decoder(config['size_vocab'], config['size_embed'], config['size'], config['depth'])
self.ToTxt = Dense(config['size'], config['size_vocab'])
self.inputs = [T.imatrix()]
self.target = T.imatrix()
def params(self):
return params(self.Decode, self.ToTxt)
def __call__(self, out_prev):
return softmax3d(self.ToTxt(self.Decode(out_prev)))
def cost(self, target, prediction):
oh = OneHot(size_in=self.config['size_vocab'])
return CrossEntropy(oh(target), prediction)
def args(self, item):
"""Choose elements of item to be passed to .loss_test and .train functions."""
inp, target_v, out_prev, target_t = item
return (out_prev, target_t)
def _make_representation(self):
with context.context(training=False):
rep = self.Decode(*self.inputs)
return theano.function(self.inputs, rep)
def _make_pile(self):
with context.context(training=False):
rep = self.Decode.GRU.intermediate(self.Decode.Embed(*self.inputs))
return theano.function(self.inputs, rep)
| mit | Python | |
bcb8615fb0d009ad4e7899b9e91701333dc56990 | Add abyss package (#4555) | matthiasdiener/spack,tmerrick1/spack,krafczyk/spack,skosukhin/spack,tmerrick1/spack,LLNL/spack,matthiasdiener/spack,iulian787/spack,EmreAtes/spack,tmerrick1/spack,skosukhin/spack,mfherbst/spack,TheTimmy/spack,iulian787/spack,tmerrick1/spack,iulian787/spack,lgarren/spack,TheTimmy/spack,lgarren/spack,mfherbst/spack,tmerrick1/spack,lgarren/spack,skosukhin/spack,TheTimmy/spack,EmreAtes/spack,LLNL/spack,matthiasdiener/spack,skosukhin/spack,lgarren/spack,skosukhin/spack,iulian787/spack,TheTimmy/spack,LLNL/spack,krafczyk/spack,lgarren/spack,matthiasdiener/spack,EmreAtes/spack,EmreAtes/spack,LLNL/spack,krafczyk/spack,EmreAtes/spack,krafczyk/spack,TheTimmy/spack,mfherbst/spack,mfherbst/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,LLNL/spack,krafczyk/spack | var/spack/repos/builtin/packages/abyss/package.py | var/spack/repos/builtin/packages/abyss/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Abyss(AutotoolsPackage):
"""ABySS is a de novo, parallel, paired-end sequence assembler
that is designed for short reads. The single-processor version
is useful for assembling genomes up to 100 Mbases in size."""
homepage = "http://www.bcgsc.ca/platform/bioinfo/software/abyss"
url = "http://www.bcgsc.ca/platform/bioinfo/software/abyss/releases/2.0.2/abyss-2.0.2.tar.gz"
version('2.0.2', '1623f55ad7f4586e80f6e74b1f27c798')
depends_on('mpi')
depends_on('boost@:1.50.0,1.53.0:')
depends_on('sparsehash')
depends_on('sqlite')
conflicts('^intel-mpi')
conflicts('^intel-parallel-studio+mpi')
conflicts('^mvapich2')
conflicts('^spectrum-mpi')
def configure_args(self):
args = ['--with-boost=%s' % self.spec['boost'].prefix,
'--with-sqlite=%s' % self.spec['sqlite'].prefix,
'--with-mpi=%s' % self.spec['mpi'].prefix]
if self.spec['mpi'].name == 'mpich':
args.append('--enable-mpich')
return args
| lgpl-2.1 | Python | |
418e714e3d544abc7120c7252c51493cd59081a0 | Add custom CommentedObjectManager | jezdez-archive/django-comment-utils,paltman/django-comment-utils,clones/django-comment-utils | comment_utils/managers.py | comment_utils/managers.py | """
Custom manager which managers of objects which allow commenting can
inheit from.
"""
from django.db import models
class CommentedObjectManager(models.Manager):
"""
A custom manager class which provides useful methods for types of
objects which allow comments.
Models which allow comments but don't need the overhead of their
own fully-defined custom manager should use an instance of this
manager as their default manager.
Models which allow comments and which do have fully-defined custom
managers should have those managers subclass this one.
"""
def most_commented(self, num=5, free=True):
"""
Returns the ``num`` objects of a given model with the highest
comment counts, in order.
Pass ``free=False`` if you're using the registered comment
model (``Comment``) instead of the anonymous comment model
(``FreeComment``).
"""
from django.db import backend, connection
from django.contrib.comments import models as comment_models
from django.contrib.contenttypes.models import ContentType
if free:
comment_opts = comment_models.FreeComment._meta
else:
comment_opts = comment_models.Comment._meta
ctype = ContentType.objects.get_for_model(self.model)
query = """SELECT %s, COUNT(*) AS score
FROM %s
WHERE content_type_id = %%s
AND is_public = 1
GROUP BY %s
ORDER BY score DESC""" % (backend.quote_name('object_id'),
backend.quote_name(comment_opts.db_table),
backend.quote_name('object_id'),)
cursor = connection.cursor()
cursor.execute(query, [ctype.id])
entry_ids = [row[0] for row in cursor.fetchall()[:num]]
# Use ``in_bulk`` here instead of an ``id__in`` filter, because ``id__in``
# would clobber the ordering.
entry_dict = self.in_bulk(entry_ids)
return [entry_dict[entry_id] for entry_id in entry_ids]
| bsd-3-clause | Python | |
f189ed9401e82e55a7b3b73ce06a8f5c642344ac | Add functional test file | Soaring-Outliers/news_graph,Soaring-Outliers/news_graph,Soaring-Outliers/news_graph,Soaring-Outliers/news_graph | functional_tests.py | functional_tests.py | from selenium import webdriver
import unittest
class Test(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3) # Browser will eventually wait 3 secs
# for a thing to appear if needed
def tearDown(self):
self.browser.quit() | mit | Python | |
09a25009965d9951614ed0702185947f796c41a0 | Create scraper.py | shauryashahi/Indian-Schools-Database | scraper.py | scraper.py | from lxml.html import parse
def main():
baseurl = 'http://www.schoolcolleges.com/school.select.php?offset=%s&val=city=%270%27&select=%s'
states = [
'Andhra Pradesh',
'Arunachal Pradesh',
'Assam',
'BIHAR',
'Chhattisgarh',
'Goa',
'Gujarat',
'Haryana',
'Himachal Pradesh',
'Jammu & Kashmir',
'Jharkhand',
'Karnataka',
'Kerala',
'Madhya Pradesh',
'Maharashtra',
'Manipur',
'Meghalaya',
'Mizoram',
'Nagaland',
'Odisha',
'Punjab',
'Rajasthan',
'Sikkim',
'Tamil Nadu',
'Tripura',
'Uttarakhand',
'Uttar Pradesh',
'West Bengal']
n = 10
for state in states:
offset = str(n)
url = baseurl %(offset) %(state)
page = parse(url).getroot()
#tr21,25,29...
for i in xrange(21,102,4):
table = page.cssselect('tr')[i]
data = table.cssselect('td')
| apache-2.0 | Python | |
691543bb43b67dd9cc9ff6d6ee6a212badd4c61e | add valid unicode example | AlanCoding/Ansible-inventory-file-examples,AlanCoding/Ansible-inventory-file-examples | scripts/unicode_valid.py | scripts/unicode_valid.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
print(json.dumps({
"_meta": {
"hostvars": {
"not_unicode": {"host_var": "unicode here 日本語"}
}
},
"all": {
"vars": {
"inventory_var": "this is an inventory var 日本語"
}
},
"group_日本語": {
"hosts": ["not_unicode"],
"vars": {
"group_var": "this is group_var 日本語"
}
}
})) | mit | Python | |
2bc7acd167d6e18dfbc2bc2625957f2bd58fa1f5 | Create spacial_prototype.py | jbobotek/elcano,jbobotek/elcano,jbobotek/elcano,jbobotek/elcano,jbobotek/elcano,jbobotek/elcano | Vision/spacial_prototype.py | Vision/spacial_prototype.py | import numpy as np
import math as m
# Prototype code for the image-to-world location system. Requires numpy.
# TODO
def _inverse_perspective():
pass
# Convert a global coordinate to a relative coordinate
# (roll, pitch, yaw) = camera_angle
# (x, y, z) = camera_pos, cone_pos (global coordinates)
# (width, height) = img_size, sensor_size (sizes)
# Returns a 2x1 Matrix with the [X Y] pixel of the cone
def _global_to_relative(camera_angle,
camera_pos,
cone_pos,
focal_length,
img_size,
sensor_size):
(a, b, c) = camera_angle
(X1w, Y1w, Z1w) = camera_pos
(X2w, Y2w, Z2w) = cone_pos
(img_width, img_height) = img_size
(sensor_width, sensor_height) = sensor_size
wP = sensor_width / img_width; hP = sensor_height / img_height
d = m.sqrt((X1w - X2w)**2 + (Y1w - Y2w)**2 + (Z1w - Z2w)**2)
df = np.array([(X2w - X1w),
(Y2w - Y1w),
(Z2w - Z1w)])
ma = np.array([(1, 0 , 0 ),
(0, m.cos(a), m.sin(a)),
(0, -m.sin(a), m.cos(a))])
mb = np.array([(m.cos(b), 0, -m.sin(b)),
(0 , 1, 0 ),
(m.sin(b), 0, m.cos(b))])
mc = np.array([( m.cos(c), m.sin(c), 0),
(-m.sin(c), m.cos(c), 0),
(0 , 0 , 1)])
cc = np.array([(0, focal_length / (d * wP), 0),
(0, 0, focal_length / (d * hP))])
im = np.array([(img_width / 2),
(img_height / 2)])
converted_to_camera = mc.dot(mb).dot(ma).dot(df)
return cc.dot(converted_to_camera) + im
# Tiny test suite for the _global_to_relative function
def _test_global_to_relative():
print(_global_to_relative((0, 0, 0), (0, 0, 0), (10, 2, 0), 0.01, (320, 240), (0.05, 0.05)))
print(_global_to_relative((0, 0, 0), (0, 0, 1), (10, 2, 0), 0.01, (320, 240), (0.05, 0.05)))
print(_global_to_relative((0, 0.3, 0), (0, 0, 0), (10, 2, 0), 0.01, (320, 240), (0.05, 0.05)))
print(_global_to_relative((0, 0, 0.3), (0, 0, 0), (10, 2, 0), 0.01, (320, 240), (0.05, 0.05)))
if __name__ == "__main__":
_test_global_to_relative()
| mit | Python | |
785f6a4f435c68bb6336b4e42da0964cf5cbfce4 | Add module that finds classifier training examples given a ground truth in the graph | chaubold/hytra,chaubold/hytra,chaubold/hytra | hytra/jst/classifiertrainingexampleextractor.py | hytra/jst/classifiertrainingexampleextractor.py | '''
Provide methods to find positive and negative training examples from a hypotheses graph and
a ground truth mapping, in the presence of multiple competing segmentation hypotheseses.
'''
import numpy as np
import logging
from hytra.core.random_forest_classifier import RandomForestClassifier
def getLogger():
''' logger to be used in this module '''
return logging.getLogger(__name__)
def trainDetectionClassifier(hypothesesGraph, gtFrameIdToGlobalIdsWithScoresMap, numSamples=100, selectedFeatures=None):
"""
Finds the given number of training examples, half as positive and half as negative examples, from the
given graph and mapping.
Positive examples are those with the highest jaccard score, while negative examples can either
just not be the best match for a GT label, or also be not matched at all.
**Returns**: a trained random forest
"""
# create a list of all elements, sort them by their jaccard score, then pick from both ends?
getLogger().debug("Extracting candidates")
candidates = []
nodeTraxelMap = hypothesesGraph.getNodeTraxelMap()
for node in hypothesesGraph.nodeIterator():
if 'JaccardScores' in nodeTraxelMap[node].Features and len(nodeTraxelMap[node].Features['JaccardScores']) > 0:
globalIdsAndScores = nodeTraxelMap[node].Features['JaccardScores']
globalIdsAndScores = sorted(globalIdsAndScores, key=lambda x: x[1])
bestScore = globalIdsAndScores[-1][1]
candidates.append( (node, bestScore) )
assert(len(candidates) >= numSamples)
candidates.sort(key=lambda x: x[1])
# pick the first and last numSamples/2, and extract their features?
# use RandomForestClassifier's method "extractFeatureVector"
selectedSamples = candidates[0:numSamples//2] + candidates[-numSamples//2-1:-1]
labels = np.hstack([np.zeros(numSamples//2), np.ones(numSamples//2)])
# TODO: make sure that the positive examples were all selected in the GT mapping
getLogger().debug("construct feature matrix")
node = selectedSamples[0][0]
if selectedFeatures is None:
selectedFeatures = nodeTraxelMap[node].Features.keys()
forbidden = ['JaccardScores', 'id', 'filename' , 'Polygon', 'detProb', 'divProb', 'com']
forbidden += [f for f in selectedFeatures if f.count('_') > 0]
for f in forbidden:
if f in selectedFeatures:
selectedFeatures.remove(f)
getLogger().info("No list of selected features was specified, using {}".format(selectedFeatures))
rf = RandomForestClassifier(selectedFeatures=selectedFeatures)
features = rf.extractFeatureVector(nodeTraxelMap[node].Features, singleObject=True)
featureMatrix = np.zeros( [len(selectedSamples), features.shape[1]] )
featureMatrix[0, :] = features
for idx, nodeAndScore in enumerate(selectedSamples[1:]):
features = rf.extractFeatureVector(nodeTraxelMap[nodeAndScore[0]].Features, singleObject=True)
featureMatrix[idx + 1, :] = features
rf.train(featureMatrix, labels)
return rf | mit | Python | |
23f6d87b94bf0340b70b9803f1b8c712f1d88726 | Add models in session module. | DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site | dataviva/apps/session/models.py | dataviva/apps/session/models.py | from dataviva.apps.session.login_providers import facebook, twitter, google
from dataviva.apps.account.models import User
from dataviva.utils.encode import sha512
from flask import Blueprint, request, render_template, session, redirect, Response
from flask.ext.login import login_user, logout_user
from forms import LoginForm
mod = Blueprint('session', __name__,
template_folder='templates',
url_prefix='/<lang_code>/session',
static_folder='static')
@mod.route('/login', methods=["GET", "POST"])
def login():
form = LoginForm()
if request.method == "POST":
user = User.query.filter_by(email=form.email.data, password=sha512(form.password.data)).first()
if user:
if user.confirmed:
login_user(user, remember=True)
return redirect("/")
else:
return Response("Confirm Pending", status=401, mimetype='application/json', )
else:
return Response("Email or Password Incorrect!", status=400, mimetype='application/json')
else:
return render_template('user/login.html', form=form)
@mod.route('/logout/')
def logout():
session.pop('twitter_token', None)
session.pop('google_token', None)
session.pop('facebook_token', None)
logout_user()
return redirect('/')
| mit | Python | |
ccd1822d65f5565d4881e5a6a32b535e55cc2b50 | Implement preview of entries for restricted users in EntryPreviewMixin | 1844144/django-blog-zinnia,ghachey/django-blog-zinnia,petecummings/django-blog-zinnia,marctc/django-blog-zinnia,bywbilly/django-blog-zinnia,ghachey/django-blog-zinnia,Zopieux/django-blog-zinnia,ghachey/django-blog-zinnia,extertioner/django-blog-zinnia,1844144/django-blog-zinnia,ZuluPro/django-blog-zinnia,extertioner/django-blog-zinnia,dapeng0802/django-blog-zinnia,bywbilly/django-blog-zinnia,Maplecroft/django-blog-zinnia,ZuluPro/django-blog-zinnia,marctc/django-blog-zinnia,Maplecroft/django-blog-zinnia,1844144/django-blog-zinnia,aorzh/django-blog-zinnia,Zopieux/django-blog-zinnia,Fantomas42/django-blog-zinnia,Fantomas42/django-blog-zinnia,aorzh/django-blog-zinnia,Maplecroft/django-blog-zinnia,dapeng0802/django-blog-zinnia,ZuluPro/django-blog-zinnia,Zopieux/django-blog-zinnia,petecummings/django-blog-zinnia,extertioner/django-blog-zinnia,bywbilly/django-blog-zinnia,aorzh/django-blog-zinnia,petecummings/django-blog-zinnia,marctc/django-blog-zinnia,dapeng0802/django-blog-zinnia,Fantomas42/django-blog-zinnia | zinnia/views/mixins/entry_preview.py | zinnia/views/mixins/entry_preview.py | """Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
from zinnia.managers import PUBLISHED
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.status == PUBLISHED:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user in obj.authors.all()):
return obj
raise Http404(_('No entry found matching the query'))
| bsd-3-clause | Python | |
120c93a2dd0022de5cb3a30ceffc027e69b23c3a | Add ProgressMonitor | jimfleming/recurrent-entity-networks,mikalyoung/recurrent-entity-networks,jimfleming/recurrent-entity-networks,mikalyoung/recurrent-entity-networks | entity_networks/monitors.py | entity_networks/monitors.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import tensorflow as tf
from tqdm import tqdm
class ProgressMonitor(tf.contrib.learn.monitors.EveryN):
def __init__(self, tensor_names, every_n_steps=100, first_n_steps=1):
super(ProgressMonitor, self).__init__(every_n_steps, first_n_steps)
if not isinstance(tensor_names, dict):
tensor_names = {tensor_name: tensor_name for tensor_name in tensor_names}
self._tensor_names = tensor_names
self._tensor_history = [np.zeros(every_n_steps) for tensor_name in tensor_names]
self._last_step = 0
def begin(self, max_steps=None):
super(ProgressMonitor, self).begin(max_steps)
self._progress_bar = tqdm(total=max_steps, unit='batches')
def end(self, session=None):
super(ProgressMonitor, self).end(session)
self._progress_bar.close()
def every_n_step_begin(self, step):
super(ProgressMonitor, self).every_n_step_begin(step)
return list(self._tensor_names.values())
def every_n_step_end(self, step, outputs):
super(ProgressMonitor, self).every_n_step_end(step, outputs)
stats = []
for (tag, tensor_name), tensor_history in zip(self._tensor_names.iteritems(), self._tensor_history):
tensor_history[step%self._every_n_steps] = outputs[tensor_name]
tensor_mean = np.mean(tensor_history[:min(step, self._every_n_steps)])
stats.append("{}: {:.6f}".format(tag, tensor_mean))
self._progress_bar.set_description(", ".join(stats))
self._progress_bar.update(step - self._last_step)
self._last_step = step
| mit | Python | |
770ed3ea3ec2ab8d76172b85bd8b37c22517139c | add initial define function | PrestigeDox/Watashi-SelfBot | cogs/define.py | cogs/define.py | import discord
from discord.ext import commands
from bs4 import BeautifulSoup
class Define:
def __init__(self, bot):
self.bot = bot
self.aiohttp_session = bot.aiohttp_session
self.url = 'https://google.com/search'
self.headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/41.0.2228.0 Safari/537.36'}
self.parts_of_speech = {'noun': 'n.', 'verb': 'v.', 'adjective': 'adj.', 'adverb': 'adv.',
'interjection': 'interj.', 'conjunction': 'conj.', 'preposition': 'prep.',
'pronoun': 'pron.'}
self.error_cmd = bot.get_command('error')
@commands.command(aliases=['def'])
async def define(self, ctx, word: str):
""" Define a word """
params = {'q': f'define+{word}', 'source': 'hp'}
async with self.aiohttp_session.get(self.url, params=params, headers=self.headers) as r:
html = await r.text()
soup = BeautifulSoup(html, 'lxml')
try:
defn = soup.find('div', attrs={'data-dobid': 'dfn'}).span.text
pos = self.parts_of_speech[soup.find('div', attrs={'class': 'lr_dct_sf_h'}).span.text]
except AttributeError:
print('Unable to find definition. Ensure you do not have to do a Google captcha.')
return await ctx.invoke(self.error_cmd, err=f'Unable to find a definition for `{word}`.')
await ctx.send(f'{word} _{pos}_ {defn}')
def setup(bot):
bot.add_cog(Define(bot))
| mit | Python | |
fe7d8e23a6ab8d86c39ef8ede2ddafa40a7fc1fb | Add RIPE space lookup thread | job/irrexplorer,job/irrexplorer,job/irrexplorer,job/irrexplorer | irrexplorer/ripe.py | irrexplorer/ripe.py | #!/usr/bin/env python
# Copyright (C) 2015 Job Snijders <job@instituut.net>
#
# This file is part of IRR Explorer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import radix
import time
import threading
import multiprocessing
class RIPELookupWorker(threading.Thread):
"""
A lookup thread specific to the BGP data, might be good
to merge this into the IRR lookup worker at some point.
"""
def __init__(self, tree, prefixes, lookup_queue, result_queue):
threading.Thread.__init__(self)
self.tree = tree
self.prefixes = prefixes
self.lookup_queue = lookup_queue
self.result_queue = result_queue
#FIXME shipping with hardcoded data is not the nicest approach
for prefix in open('data/ripe-managed-space.txt').readlines():
self.tree.add(prefix.strip())
self.prefixes.append(prefix.strip())
def run(self):
while True:
lookup, target = self.lookup_queue.get()
if not lookup:
continue
if lookup == "is_covered":
result = self.tree.search_worst(target).prefix
self.result_queue.put(result)
self.lookup_queue.task_done()
class RIPEWorker(multiprocessing.Process):
"""
FIXME: dynamically fetch & update the RIPE managed tree
"""
def __init__(self, lookup_queue, result_queue):
multiprocessing.Process.__init__(self)
self.lookup_queue = lookup_queue
self.result_queue = result_queue
self.tree = radix.Radix()
self.prefixes = []
self.dbname = "RIPE-AUTH"
self.lookup = RIPELookupWorker(self.tree, self.prefixes,
self.lookup_queue, self.result_queue)
self.lookup.setDaemon(True)
self.lookup.start()
def run(self):
print "info: loaded the tree"
if __name__ == "__main__":
lookup_queue = multiprocessing.JoinableQueue()
result_queue = multiprocessing.JoinableQueue()
a = RIPEWorker(lookup_queue, result_queue)
a.start()
time.sleep(1)
lookup_queue.put(("is_covered", "194.33.96.0/24"))
lookup_queue.join()
print result_queue.get()
| bsd-2-clause | Python | |
a91386a802d3346c945e107aa3abd6aa5fcfe0d7 | Solve double base palindrome | daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various | project_euler/036.double_palindromes.py | project_euler/036.double_palindromes.py | '''
Problem 036
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in
base 10 and base 2.
(Please note that the palindromic number, in either base, may not include
leading zeros.)
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
def is_palindrome(number):
if str(number) == str(number)[::-1]:
return True
return False
def solve_problem(limit):
palindromes = []
for n in range(1, limit):
if is_palindrome(n):
if is_palindrome(format(n, 'b')):
palindromes.append(n)
return(sum(palindromes))
if __name__ == "__main__":
limit = 1000000
print(solve_problem(limit))
| mit | Python | |
892b6b6cb334ec3f932881f7e698e3ab6619cbf3 | add a script to get an API token | browniebroke/deezer-python,browniebroke/deezer-python,browniebroke/deezer-python | oauth.py | oauth.py | """Simple script to obtain an API token via OAuth."""
import webbrowser
from argparse import ArgumentParser
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Dict
from urllib.parse import urlencode
import requests
HOST_NAME = "localhost"
SERVER_PORT = 8080
REDIRECT_PATH = "/oauth/return"
class OAuthDancer:
"""A class to help with completing the OAuth dance."""
base_url: str = "https://connect.deezer.com"
app_id: str
app_secret: str
redirect_url: str = f"http://{HOST_NAME}:{SERVER_PORT}{REDIRECT_PATH}"
def __init__(self, app_id: str, app_secret: str) -> None:
self.app_id = app_id
self.app_secret = app_secret
def get_auth_page(self):
"""Build the URL of the auth page where the process starts."""
query = urlencode(
{
"app_id": self.app_id,
"redirect_uri": self.redirect_url,
"perms": "basic_access,email",
}
)
return f"{self.base_url}/oauth/auth.php?{query}"
def get_token(self, code: str) -> Dict[str, str]:
"""Make the API call to obtain the token."""
query = urlencode(
{
"app_id": self.app_id,
"secret": self.app_secret,
"code": code,
}
)
url = f"{self.base_url}/oauth/access_token.php?{query}"
response = requests.get(url)
content = response.content.decode()
# The body looks like this: 'access_token=blah&expires=1234'
# -> parse this to a dictionary
return dict(tuple(p.split("=", 1)) for p in content.split("&"))
class MyServer(BaseHTTPRequestHandler):
"""Simple HTTP request handler to perform the OAuth dance."""
def do_GET(self) -> None:
"""Route GET requests to the right handler."""
if self.path.startswith(REDIRECT_PATH):
self.redirect_route()
else:
self._render_content(
f'<a href="{self.oauth_dancer.get_auth_page()}">Start Oauth Flow</a>'
)
def redirect_route(self) -> None:
"""
Handle the redirect route.
Once the OAuth is approved, Deezer redirects to this route with `?code=blah`
at the end of the URL.
Grab this code, make the API call to obtain the token and display it.
"""
route, params_str = self.path.split("?", 1)
query_params = dict(p.split("=", 1) for p in params_str.split("&"))
token_data = self.oauth_dancer.get_token(query_params["code"])
self._render_content(
f"Token = {token_data['access_token']}"
f"<br>"
f"Expires = {token_data['expires']}"
)
raise SystemExit("All Done")
def _render_content(self, content: str):
"""Render the provided content in a HTML page."""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
bytes(
"<html>"
"<head><title>Deezer API OAuth dancer</title></head>"
"<body>"
f"<p>{content}</p>"
"</body>"
"</html>",
"utf-8",
)
)
@property
def oauth_dancer(self):
"""Shortcut to access the `OAuthDancer` instance."""
return self.server.oauth_dancer
if __name__ == "__main__":
# Parse command line options
parser = ArgumentParser()
parser.add_argument("--app-id", type=str, required=True)
parser.add_argument("--app-secret", type=str, required=True)
args = parser.parse_args()
# Start local webserver
webserver = HTTPServer((HOST_NAME, SERVER_PORT), MyServer)
# Commence OAuth Dance
webserver.oauth_dancer = OAuthDancer(app_id=args.app_id, app_secret=args.app_secret)
start_url = webserver.oauth_dancer.get_auth_page()
print(f"Opening {start_url} in web browser...")
webbrowser.open(start_url)
# Wait for user action and display token when finished
try:
webserver.serve_forever()
except (KeyboardInterrupt, SystemExit):
pass
finally:
webserver.server_close()
| mit | Python | |
3096af347f1cda453eb48f7002371a49b389c568 | use keep_lazy if available | haakenlid/django-extensions,haakenlid/django-extensions,django-extensions/django-extensions,linuxmaniac/django-extensions,linuxmaniac/django-extensions,django-extensions/django-extensions,linuxmaniac/django-extensions,haakenlid/django-extensions,django-extensions/django-extensions | django_extensions/utils/text.py | django_extensions/utils/text.py | # -*- coding: utf-8 -*-
import six
from django.utils.encoding import force_text
try:
from django.utils.functional import keep_lazy
KEEP_LAZY = True
except ImportError:
from django.utils.functional import allow_lazy
KEEP_LAZY = False
def truncate_letters(s, num):
"""
truncates a string to a number of letters, similar to truncate_words
"""
s = force_text(s)
length = int(num)
if len(s) > length:
s = s[:length]
if not s.endswith('...'):
s += '...'
return s
if KEEP_LAZY:
truncate_letters = keep_lazy(six.text_type)(truncate_letters)
else:
truncate_letters = allow_lazy(truncate_letters, six.text_type)
| # -*- coding: utf-8 -*-
import six
from django.utils.encoding import force_text
from django.utils.functional import allow_lazy
def truncate_letters(s, num):
"""
truncates a string to a number of letters, similar to truncate_words
"""
s = force_text(s)
length = int(num)
if len(s) > length:
s = s[:length]
if not s.endswith('...'):
s += '...'
return s
truncate_letters = allow_lazy(truncate_letters, six.text_type)
| mit | Python |
005c9d1a51793fe76c798be2f546552bb2ee2088 | add word graph boilerplate code | parrt/msan501-starterkit | graphs/wordgraph.py | graphs/wordgraph.py | def gml2adjlist(G):
"""
Return a dict mapping word to adjacent nodes. G.node dict in memory
looks like:
{0: {'id': 0, 'value': 0, 'label': 'agreeable'},
1: {'id': 1, 'value': 1, 'label': 'man'}, ... }
and G.edge dict looks like:
{0: {1: {}, 2: {}, 3: {}}, 1: {0: {}, 19: {}, 2: {}, 102: {}, ...}, ...}
and we need:
{agreeable:['man', 'old', 'person'], man:[['agreeable', 'best', 'old', ...], ...}
"""
words = collections.OrderedDict() # keep stuff in order read from GML
...
return words
| bsd-2-clause | Python | |
7c10150d5e667921450e8663fa9440253a495160 | Add migration for moving recomended articles recomended section | praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem | gem/migrations/0014_convert_recomended_articles.py | gem/migrations/0014_convert_recomended_articles.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('gem', '0013_gemsettings_moderator_name'),
]
operations = [
migrations.RunPython(convert_articles),
]
| bsd-2-clause | Python | |
909f2c9739429ea3e6954a829e0776d84714d4fd | Add migration | webkom/holonet,webkom/holonet,webkom/holonet | holonet/core/migrations/0007_auto_20150324_1049.py | holonet/core/migrations/0007_auto_20150324_1049.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20150324_0035'),
]
operations = [
migrations.AlterField(
model_name='user',
name='sasl_token',
field=models.CharField(max_length=32, verbose_name='SASL Token', unique=True),
preserve_default=True,
),
]
| mit | Python | |
abd05378eb6acf742f2deff4228a0bca4492521b | Add example showing scraping/parsing of an HTML table into a Python dict | pyparsing/pyparsing,pyparsing/pyparsing | examples/htmlTableParser.py | examples/htmlTableParser.py | #
# htmlTableParser.py
#
# Example of parsing a simple HTML table into a list of rows, and optionally into a little database
#
# Copyright 2019, Paul McGuire
#
import pyparsing as pp
import urllib.request
# define basic HTML tags, and compose into a Table
table, table_end = pp.makeHTMLTags('table')
thead, thead_end = pp.makeHTMLTags('thead')
tbody, tbody_end = pp.makeHTMLTags('tbody')
tr, tr_end = pp.makeHTMLTags('tr')
th, th_end = pp.makeHTMLTags('th')
td, td_end = pp.makeHTMLTags('td')
a, a_end = pp.makeHTMLTags('a')
# method to strip HTML tags from a string - will be used to clean up content of table cells
strip_html = (pp.anyOpenTag | pp.anyCloseTag).suppress().transformString
# expression for parsing <a href="url">text</a> links, returning a (text, url) tuple
link = pp.Group(a + pp.SkipTo(a_end)('text') + a_end.suppress())
link.addParseAction(lambda t: (t[0].text, t[0].href))
# method to create table rows of header and data tags
def table_row(start_tag, end_tag):
body = pp.SkipTo(end_tag)
body.addParseAction(pp.tokenMap(str.strip),
pp.tokenMap(strip_html))
row = pp.Group(tr.suppress()
+ pp.ZeroOrMore(start_tag.suppress()
+ body
+ end_tag.suppress())
+ tr_end.suppress())
return row
th_row = table_row(th, th_end)
td_row = table_row(td, td_end)
# define expression for overall table - may vary slightly for different pages
html_table = table + tbody + pp.Optional(th_row('headers')) + pp.ZeroOrMore(td_row)('rows') + tbody_end + table_end
# read in a web page containing an interesting HTML table
with urllib.request.urlopen("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones") as page:
page_html = page.read().decode()
tz_table = html_table.searchString(page_html)[0]
# convert rows to dicts
rows = [dict(zip(tz_table.headers, row)) for row in tz_table.rows]
# make a dict keyed by TZ database name
tz_db = {row['TZ database name']: row for row in rows}
from pprint import pprint
pprint(tz_db['America/Chicago'])
| mit | Python | |
5aeb0e41621eeb397ea16aff22d7f4deaf8fa7a2 | Add python play example | Drooids/sipgate.io,Drooids/sipgate.io,Drooids/sipgate.io,Drooids/sipgate.io,Drooids/sipgate.io,Drooids/sipgate.io,Drooids/sipgate.io,Drooids/sipgate.io,Drooids/sipgate.io | examples/python/play-url.py | examples/python/play-url.py | #!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urlparse
import logging
from xml.dom.minidom import Document
logging.basicConfig(level=logging.DEBUG)
class MegaAwesomePythonServer(BaseHTTPRequestHandler):
def do_POST(self):
length = int(self.headers.getheader('Content-Length'))
data = urlparse.parse_qs(self.rfile.read(length))
logging.debug("from: " + data.get("from")[0])
logging.debug("to: " + data.get("to")[0])
doc = Document()
response = doc.createElement('Response')
play = doc.createElement('Play')
url = doc.createElement('Url')
urlString = doc.createTextNode('http://www.example.com/example.wav')
url.appendChild(urlString)
play.appendChild(url)
response.appendChild(play)
doc.appendChild(response)
self.send_response(200)
self.send_header('Content-Type', 'application/xml')
self.end_headers()
self.wfile.write(doc.toxml())
server = HTTPServer(('', 3000), MegaAwesomePythonServer)
server.serve_forever()
| bsd-2-clause | Python | |
c1f3bb8b3bc3a6685cd839df92a035298ecea2b9 | Create compoundword.py | disasterisk/itc110 | compoundword.py | compoundword.py | import random
dic1 = ["life", "moon", "butter", "fire", "basket", "foot", "weather", "earth", "play", "super", "grand", "rattle", "skate", "grass", "eye", "honey", "dish", "pop", "book", "thunder", "head", "glass", "boot", "air", "baby", "ham", "common", "sea", "sand", "river", "tooth", "town", "sauce", "disk", "horse", "rain", "stone"] # - week, no, north, up, down, more, along, cross, some, back, home, every, what, long, school, watch, key, under, south, any, life, black, wide, rail
dic2 = ["guard", "walk", "time", "light", "body", "flies", "thing", "ball", "man", "quake", "stream", "day", "bone", "giant", "goat", "mother", "flower", "structure", "snake", "board", "house", "hopper", "made", "smith", "moon", "washer", "corn", "case", "fish", "storm", "town", "maker", "making", "plane", "sitter", "person", "ship", "dew", "drive", "paste", "keeper", "check", "woman","watch", "fighter"]#-ever, self, place, pan, back, down, way, shore, pick, noon, end, stone, ground, drive, road, strap
goodWords = set()
end = "Maybe another time then"
print ("Want to know my favorite word?")
def ask():
ans = input('y/n: ')
if ans == 'y':
return True
else:
return False
play = ask()
while play == True :
corn = random.choice(dic1)
dog = random.choice(dic2)
cornDog=corn+dog
if len(goodWords)<(len(dic1)*len(dic2)):
while cornDog in goodWords:
print ("Hold on...")
corn = random.choice(dic1)
dog = random.choice(dic2)
cornDog=corn+dog
goodWords.add(cornDog)
print (("\'"+cornDog+".\' ")+(cornDog+" ")*random.randint(2,4)+cornDog+".")
print ("Do you want to hear another excellent word?")
play = ask()
else:
end = "There are no more! Are you happy??"
play = False
print (end)
| mit | Python | |
b48bd670084cd1b2e443eb284813b949edbff6ca | Add gunicorn config | angstwad/linky,angstwad/linky | linky/config/gunicorn.conf.py | linky/config/gunicorn.conf.py | import multiprocessing
appname = "linky"
procname = appname
bind = "unix:/tmp/%s" % appname
workers = multiprocessing.cpu_count() * 2 + 1
max_requests = 1000
preload_app = True
accesslog = "/home/webapp/apps/linky/logs/access.log"
errorlog = "/home/webapp/apps/linky/logs/error.log"
loglevel = "info"
| apache-2.0 | Python | |
19f8cf043437d3ed0feac6ce1619636189904277 | add get_partners.py | Mesitis/community | sample-code/Python/get_partners.py | sample-code/Python/get_partners.py | '''
- login and get token
- process 2FA if 2FA is setup for this account
- returns all user types if user is a partner admin (or above) - else error
'''
import requests
import json
get_token_url = "https://api.canopy.cloud:443/api/v1/sessions/"
validate_otp_url = "https://api.canopy.cloud:443/api/v1/sessions/otp/validate.json" #calling the production server for OTP authentication
get_partner_users_url = "https://api.canopy.cloud:443/api/v1/admin/users.json"
get_partners_url = "https://api.canopy.cloud:443/api/v1/admin/partners.json"
#please replace below with your username and password over here
username = 'login_name'
password = 'xxxxxxxxx'
#please enter the OTP token in case it is enabled
otp_code = '123456'
#first call for a fresh token
payload = "user%5Busername%5D=" + username + "&user%5Bpassword%5D=" + password
headers = {
'accept': "application/json",
'content-type':"application/x-www-form-urlencoded"
}
response = requests.request("POST", get_token_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True)
token = response.json()['token']
login_flow = response.json()['login_flow']
#in case 2FA is enabled use the OTP code to get the second level of authentication
if login_flow == '2fa_verification':
headers['Authorization'] = token
payload = 'otp_code=' + otp_code
response = requests.request("POST", validate_otp_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True) #print response.text
token = response.json()['token']
headers = {
'authorization': token,
'content-type': "application/x-www-form-urlencoded; charset=UTF-8"
}
response = requests.request("GET", get_partners_url, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True)
| mit | Python | |
355094293afbe0836304be495307155aea6c26a8 | Create Brain_TTS.py | MaxMorgenstern/EmeraldAI,MaxMorgenstern/EmeraldAI,MaxMorgenstern/EmeraldAI,MaxMorgenstern/EmeraldAI,MaxMorgenstern/EmeraldAI | EmeraldAI/Application/Main/Brain_TTS.py | EmeraldAI/Application/Main/Brain_TTS.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import time
from os.path import dirname, abspath
sys.path.append(dirname(dirname(dirname(dirname(abspath(__file__))))))
reload(sys)
sys.setdefaultencoding('utf-8')
import rospy
from std_msgs.msg import String
from EmeraldAI.Logic.Modules import Pid
from EmeraldAI.Config.Config import *
from EmeraldAI.Logic.Audio.SoundMixer import *
from EmeraldAI.Logic.Memory.Brain import Brain as BrainMemory
class BrainTTS:
def __init__(self):
self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'"
self.__usePygame = Config().GetBoolean("TextToSpeech", "UsePygame")
rospy.init_node("emerald_brain_tts_node", anonymous=True)
rospy.Subscriber("/emerald_ai/io/text_to_speech/file", String, self.playAudio)
def playAudio(self, data):
dataParts = data.data.split("|")
if dataParts[0] != "TTS":
return
# TODO
audioDuration = 0
BrainMemory().Set("TTS.Until", (rospy.Time.now().to_sec() + audioDuration))
if self.__usePygame:
SoundMixer().Play(dataParts[1])
return
os.system(self.__audioPlayer.format(dataParts[1]))
##### MAIN #####
if __name__ == "__main__":
if(Pid.HasPid("Brain.TTS")):
print "Process is already runnung. Bye!"
sys.exit()
Pid.Create("Brain.TTS")
try:
BrainTTS()
except KeyboardInterrupt:
print "End"
finally:
Pid.Remove("Brain.TTS")
| apache-2.0 | Python | |
99b0596f8bdef41e08ff04e53316ae8edaab29c4 | Add loggers helper | mina-asham/pictures-dedupe-and-rename | pictures/loggers.py | pictures/loggers.py | import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s %(message)s')
def logger_from(name):
return logging.getLogger(name)
| mit | Python | |
8c2305844c2c0ac501d72567c7f70f5cf784fc7c | Add script to apply a tilix colorscheme file. (#524) | alacritty/alacritty,jwilm/alacritty,jwilm/alacritty,alacritty/alacritty,jwilm/alacritty,jwilm/alacritty | scripts/apply-tilix-colorscheme.py | scripts/apply-tilix-colorscheme.py | #!/usr/bin/env python3
import collections
import logging
import shutil
import json
import sys
import os
import yaml
log = logging.getLogger(__name__)
XDG_CONFIG_HOME = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
ALACONF_FN = os.path.join(XDG_CONFIG_HOME, 'alacritty', 'alacritty.yml')
Palette = collections.namedtuple('Pallete', ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'])
class AttrDict(dict):
"""
>>> m = AttrDict(omg=True, whoa='yes')
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def slurp_yaml(fn):
with open(fn, 'r') as fh:
# JSON is a subset of YAML.
contents = yaml.load(fh)
return contents
def fixup_hex_color(*args):
for arg in args:
val = '0x%s' % arg.strip('#')
yield val
def convert(tilix_scheme):
j = AttrDict(tilix_scheme)
palette = list(fixup_hex_color(*j.palette))
pal_normal = Palette(*palette[:8])
pal_bold = Palette(*palette[8:])
colors = {
'primary': dict(zip(
['background', 'foreground'],
fixup_hex_color(j['background-color'], j['foreground-color']),
)),
'cursor': dict(zip(
['text', 'cursor'],
fixup_hex_color(j['cursor-background-color'], j['cursor-foreground-color']),
)),
'normal': dict(pal_normal._asdict()),
'bright': dict(pal_bold._asdict()),
}
return colors
def patch_alaconf_colors(colors, alaconf_fn=ALACONF_FN):
with open(alaconf_fn, 'r') as fh:
ac_raw = fh.read()
# Write config file taking care to not remove delicious comments.
# Sure, it's janky, but less so than losing comments.
skipping = False
lines = []
for line in ac_raw.splitlines():
if skipping:
if line and line[0].isalpha():
skipping = False
elif line.startswith('colors:'):
skipping = True
if not skipping:
if not line and lines and not lines[-1]:
continue
lines.append(line)
temp_fn = '%s.tmp' % alaconf_fn
backup_fn = '%s.bak' % alaconf_fn
with open(temp_fn, 'w') as fh:
fh.write('\n'.join(lines))
fh.write('\n')
yaml.safe_dump(dict(colors=colors), fh)
shutil.copyfile(alaconf_fn, backup_fn)
os.rename(temp_fn, alaconf_fn)
def main(argv=sys.argv):
if len(argv) != 2:
print("Usage: %s TILIX_SCHEME_JSON_FILE" % sys.executable, file=sys.stderr)
sys.exit(1)
fn = argv[1]
tilix_scheme = slurp_yaml(fn)
colors = convert(tilix_scheme)
patch_alaconf_colors(colors)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
62fe7541fd1c9272616f9e7021617f2fb766bd93 | add models placeholder for django | qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq | pillowtop/models.py | pillowtop/models.py | # placeholder for django | bsd-3-clause | Python | |
2ab86a15b956954f5de99db177a6a69b48677e2b | Add Webcam object | ptomato/Beams | src/Webcam.py | src/Webcam.py | import cv
class Webcam:
def __init__(self, cam=-1):
self.capture = None
self.camera_number = cam
def __enter__(self):
self.open()
return self
def __exit__(self):
self.close()
def open()
self.capture = cv.CaptureFromCAM(self.camera_number)
def close()
cv.ReleaseCapture(self.capture)
def query_frame()
iplimage = cv.QueryFrame(self.capture)
| mit | Python | |
231943a950b49e46b86467991ca6e4c7b3505be0 | update python learn - module | heysion/1ghl,heysion/1ghl,heysion/1ghl,heysion/1ghl | python/study/module-test.py | python/study/module-test.py | #module test
import sys
print 'the sys argv list:'
for i in sys.argv:
print i
print sys.path
| bsd-3-clause | Python | |
3d3602faf4a47855be264f05d9d52253e8bd0f9d | Add RPC test for the p2p mempool command in conjunction with disabled bloomfilters | daliwangi/bitcoin,cannabiscoindev/cannabiscoin420,h4x3rotab/BTCGPU,sipsorcery/bitcoin,core-bitcoin/bitcoin,21E14/bitcoin,h4x3rotab/BTCGPU,instagibbs/bitcoin,sebrandon1/bitcoin,Rav3nPL/bitcoin,Sjors/bitcoin,mb300sd/bitcoin,mb300sd/bitcoin,HashUnlimited/Einsteinium-Unlimited,zcoinofficial/zcoin,sstone/bitcoin,myriadteam/myriadcoin,Michagogo/bitcoin,RHavar/bitcoin,Electronic-Gulden-Foundation/egulden,dpayne9000/Rubixz-Coin,daliwangi/bitcoin,randy-waterhouse/bitcoin,Rav3nPL/bitcoin,monacoinproject/monacoin,UASF/bitcoin,cculianu/bitcoin-abc,martindale/elements,tjps/bitcoin,goldcoin/Goldcoin-GLD,tjps/bitcoin,Christewart/bitcoin,ahmedbodi/temp_vert,droark/bitcoin,Exgibichi/statusquo,Mirobit/bitcoin,multicoins/marycoin,jiangyonghang/bitcoin,vmp32k/litecoin,laudaa/bitcoin,bitreserve/bitcoin,deeponion/deeponion,afk11/bitcoin,DigiByte-Team/digibyte,bdelzell/creditcoin-org-creditcoin,myriadteam/myriadcoin,jonasschnelli/bitcoin,MazaCoin/maza,particl/particl-core,ajtowns/bitcoin,particl/particl-core,peercoin/peercoin,cryptoprojects/ultimateonlinecash,starwels/starwels,trippysalmon/bitcoin,segwit/atbcoin-insight,rebroad/bitcoin,randy-waterhouse/bitcoin,kevcooper/bitcoin,emc2foundation/einsteinium,wangxinxi/litecoin,JeremyRubin/bitcoin,ryanofsky/bitcoin,rnicoll/bitcoin,Mirobit/bitcoin,chaincoin/chaincoin,peercoin/peercoin,BTCDDev/bitcoin,psionin/smartcoin,kazcw/bitcoin,core-bitcoin/bitcoin,andreaskern/bitcoin,digibyte/digibyte,namecoin/namecoin-core,bitbrazilcoin-project/bitbrazilcoin,nlgcoin/guldencoin-official,kevcooper/bitcoin,dpayne9000/Rubixz-Coin,mitchellcash/bitcoin,DigitalPandacoin/pandacoin,BitcoinPOW/BitcoinPOW,mm-s/bitcoin,lbrtcoin/albertcoin,Flowdalic/bitcoin,gmaxwell/bitcoin,shaolinfry/litecoin,jmcorgan/bitcoin,yenliangl/bitcoin,BigBlueCeiling/augmentacoin,Exgibichi/statusquo,matlongsi/micropay,isle2983/bitcoin,btc1/bitcoin,1185/starwels,djpnewton/bitcoin,starwels/starwels,afk11/bitcoin,untrustbank/litecoin,gazbert/bitcoin,simonmulser/bitcoin,namecoin/namecoin-core,jonasschnelli/bitcoin,cculianu/bitcoin-abc,funkshelper/woodcore,wellenreiter01/Feathercoin,litecoin-project/litecore-litecoin,ShadowMyst/creativechain-core,BitzenyCoreDevelopers/bitzeny,daliwangi/bitcoin,pataquets/namecoin-core,ahmedbodi/vertcoin,MeshCollider/bitcoin,ElementsProject/elements,ctwiz/stardust,domob1812/huntercore,nomnombtc/bitcoin,AdrianaDinca/bitcoin,zetacoin/zetacoin,gmaxwell/bitcoin,sipsorcery/bitcoin,jl2012/litecoin,rnicoll/dogecoin,21E14/bitcoin,BTCDDev/bitcoin,nikkitan/bitcoin,tdudz/elements,Friedbaumer/litecoin,rawodb/bitcoin,globaltoken/globaltoken,1185/starwels,jtimon/bitcoin,Bitcoin-ABC/bitcoin-abc,thrasher-/litecoin,jnewbery/bitcoin,haobtc/bitcoin,bdelzell/creditcoin-org-creditcoin,DigiByte-Team/digibyte,donaloconnor/bitcoin,fsb4000/bitcoin,sdaftuar/bitcoin,isle2983/bitcoin,bitbrazilcoin-project/bitbrazilcoin,donaloconnor/bitcoin,Chancoin-core/CHANCOIN,spiritlinxl/BTCGPU,OmniLayer/omnicore,tjps/bitcoin,bitbrazilcoin-project/bitbrazilcoin,jimmysong/bitcoin,XertroV/bitcoin-nulldata,maaku/bitcoin,domob1812/bitcoin,rebroad/bitcoin,GlobalBoost/GlobalBoost,mm-s/bitcoin,andreaskern/bitcoin,TheBlueMatt/bitcoin,apoelstra/bitcoin,n1bor/bitcoin,pinheadmz/bitcoin,awemany/BitcoinUnlimited,bitcoin/bitcoin,segwit/atbcoin-insight,achow101/bitcoin,UASF/bitcoin,ShadowMyst/creativechain-core,jlopp/statoshi,emc2foundation/einsteinium,BTCDDev/bitcoin,peercoin/peercoin,Alonzo-Coeus/bitcoin,achow101/bitcoin,sipsorcery/bitcoin,dgarage/bc2,kallewoof/elements,untrustbank/litecoin,maaku/bitcoin,shouhuas/bitcoin,mb300sd/bitcoin,cannabiscoindev/cannabiscoin420,bitreserve/bitcoin,sarielsaz/sarielsaz,shaolinfry/litecoin,ftrader-bitcoinabc/bitcoin-abc,r8921039/bitcoin,sstone/bitcoin,gzuser01/zetacoin-bitcoin,plncoin/PLNcoin_Core,GlobalBoost/GlobalBoost,zcoinofficial/zcoin,untrustbank/litecoin,yenliangl/bitcoin,mitchellcash/bitcoin,myriadcoin/myriadcoin,jnewbery/bitcoin,btc1/bitcoin,NicolasDorier/bitcoin,daliwangi/bitcoin,domob1812/bitcoin,UASF/bitcoin,rawodb/bitcoin,myriadcoin/myriadcoin,ericshawlinux/bitcoin,RHavar/bitcoin,maaku/bitcoin,jlopp/statoshi,spiritlinxl/BTCGPU,AdrianaDinca/bitcoin,wiggi/huntercore,reorder/viacoin,btc1/bitcoin,jnewbery/bitcoin,afk11/bitcoin,lateminer/bitcoin,ahmedbodi/terracoin,dcousens/bitcoin,zcoinofficial/zcoin,dscotese/bitcoin,jambolo/bitcoin,experiencecoin/experiencecoin,sarielsaz/sarielsaz,MeshCollider/bitcoin,XertroV/bitcoin-nulldata,earonesty/bitcoin,lbrtcoin/albertcoin,shelvenzhou/BTCGPU,jamesob/bitcoin,lbryio/lbrycrd,litecoin-project/litecoin,Rav3nPL/PLNcoin,appop/bitcoin,earonesty/bitcoin,x-kalux/bitcoin_WiG-B,zcoinofficial/zcoin,itmanagerro/tresting,HashUnlimited/Einsteinium-Unlimited,destenson/bitcoin--bitcoin,EthanHeilman/bitcoin,magacoin/magacoin,dogecoin/dogecoin,viacoin/viacoin,fujicoin/fujicoin,apoelstra/bitcoin,BigBlueCeiling/augmentacoin,magacoin/magacoin,elecoin/elecoin,wellenreiter01/Feathercoin,earonesty/bitcoin,Bushstar/UFO-Project,DigiByte-Team/digibyte,okinc/bitcoin,litecoin-project/litecore-litecoin,BitzenyCoreDevelopers/bitzeny,djpnewton/bitcoin,bitcoinsSG/bitcoin,nathaniel-mahieu/bitcoin,prusnak/bitcoin,deeponion/deeponion,bitcoinknots/bitcoin,ajtowns/bitcoin,NicolasDorier/bitcoin,dcousens/bitcoin,JeremyRubin/bitcoin,midnightmagic/bitcoin,jambolo/bitcoin,MazaCoin/maza,lbrtcoin/albertcoin,jamesob/bitcoin,dgarage/bc3,svost/bitcoin,cryptoprojects/ultimateonlinecash,gjhiggins/vcoincore,gravio-net/graviocoin,nathaniel-mahieu/bitcoin,gameunits/gameunits,starwels/starwels,Rav3nPL/PLNcoin,oklink-dev/bitcoin,droark/bitcoin,sebrandon1/bitcoin,psionin/smartcoin,brandonrobertz/namecoin-core,qtumproject/qtum,mitchellcash/bitcoin,XertroV/bitcoin-nulldata,zcoinofficial/zcoin,CryptArc/bitcoin,lateminer/bitcoin,UFOCoins/ufo,dpayne9000/Rubixz-Coin,instagibbs/bitcoin,EntropyFactory/creativechain-core,ftrader-bitcoinabc/bitcoin-abc,bitcoinec/bitcoinec,Kogser/bitcoin,sdaftuar/bitcoin,JeremyRubin/bitcoin,ftrader-bitcoinabc/bitcoin-abc,fujicoin/fujicoin,kevcooper/bitcoin,dgarage/bc2,jlopp/statoshi,sbaks0820/bitcoin,TheBlueMatt/bitcoin,uphold/bitcoin,joshrabinowitz/bitcoin,earonesty/bitcoin,lbrtcoin/albertcoin,NicolasDorier/bitcoin,shelvenzhou/BTCGPU,cdecker/bitcoin,1185/starwels,guncoin/guncoin,ElementsProject/elements,vmp32k/litecoin,svost/bitcoin,tdudz/elements,nlgcoin/guldencoin-official,ShadowMyst/creativechain-core,stamhe/bitcoin,randy-waterhouse/bitcoin,MarcoFalke/bitcoin,gazbert/bitcoin,jiangyonghang/bitcoin,Michagogo/bitcoin,Diapolo/bitcoin,bitcoinec/bitcoinec,GroestlCoin/GroestlCoin,sarielsaz/sarielsaz,DigitalPandacoin/pandacoin,sstone/bitcoin,GlobalBoost/GlobalBoost,StarbuckBG/BTCGPU,kallewoof/bitcoin,appop/bitcoin,gzuser01/zetacoin-bitcoin,fanquake/bitcoin,Rav3nPL/PLNcoin,UFOCoins/ufo,bitcoinplusorg/xbcwalletsource,patricklodder/dogecoin,qtumproject/qtum,BTCDDev/bitcoin,untrustbank/litecoin,oklink-dev/bitcoin,FeatherCoin/Feathercoin,1185/starwels,cculianu/bitcoin-abc,bitcoinknots/bitcoin,mincoin-project/mincoin,litecoin-project/litecore-litecoin,jamesob/bitcoin,Jcing95/iop-hd,Flowdalic/bitcoin,fanquake/bitcoin,ppcoin/ppcoin,ShadowMyst/creativechain-core,bespike/litecoin,nbenoit/bitcoin,domob1812/huntercore,bdelzell/creditcoin-org-creditcoin,domob1812/bitcoin,lbrtcoin/albertcoin,Kogser/bitcoin,romanornr/viacoin,jamesob/bitcoin,Gazer022/bitcoin,kallewoof/elements,GlobalBoost/GlobalBoost,sdaftuar/bitcoin,ryanofsky/bitcoin,bitcoinsSG/bitcoin,dgarage/bc3,patricklodder/dogecoin,digibyte/digibyte,Rav3nPL/bitcoin,rawodb/bitcoin,ftrader-bitcoinabc/bitcoin-abc,domob1812/namecore,maaku/bitcoin,rnicoll/dogecoin,senadmd/coinmarketwatch,ElementsProject/elements,lbrtcoin/albertcoin,dgarage/bc2,argentumproject/argentum,svost/bitcoin,uphold/bitcoin,shaolinfry/litecoin,bitcoinplusorg/xbcwalletsource,nikkitan/bitcoin,dogecoin/dogecoin,bitreserve/bitcoin,Chancoin-core/CHANCOIN,itmanagerro/tresting,destenson/bitcoin--bitcoin,senadmd/coinmarketwatch,achow101/bitcoin,litecoin-project/litecoin,core-bitcoin/bitcoin,Electronic-Gulden-Foundation/egulden,EntropyFactory/creativechain-core,mruddy/bitcoin,qtumproject/qtum,sbaks0820/bitcoin,ericshawlinux/bitcoin,jimmysong/bitcoin,lbryio/lbrycrd,tecnovert/particl-core,Cocosoft/bitcoin,paveljanik/bitcoin,cculianu/bitcoin-abc,jonasschnelli/bitcoin,myriadteam/myriadcoin,ryanxcharles/bitcoin,namecoin/namecore,CryptArc/bitcoin,untrustbank/litecoin,metacoin/florincoin,yenliangl/bitcoin,r8921039/bitcoin,chaincoin/chaincoin,HashUnlimited/Einsteinium-Unlimited,StarbuckBG/BTCGPU,djpnewton/bitcoin,gravio-net/graviocoin,droark/bitcoin,destenson/bitcoin--bitcoin,ctwiz/stardust,dscotese/bitcoin,mincoin-project/mincoin,ryanxcharles/bitcoin,spiritlinxl/BTCGPU,haobtc/bitcoin,Exgibichi/statusquo,MazaCoin/maza,viacoin/viacoin,Chancoin-core/CHANCOIN,Bitcoin-ABC/bitcoin-abc,CryptArc/bitcoin,BigBlueCeiling/augmentacoin,deeponion/deeponion,deeponion/deeponion,funkshelper/woodcore,Diapolo/bitcoin,nlgcoin/guldencoin-official,ahmedbodi/terracoin,BTCDDev/bitcoin,BigBlueCeiling/augmentacoin,psionin/smartcoin,r8921039/bitcoin,lbrtcoin/albertcoin,metacoin/florincoin,guncoin/guncoin,okinc/bitcoin,AdrianaDinca/bitcoin,ericshawlinux/bitcoin,AkioNak/bitcoin,MazaCoin/maza,GroestlCoin/bitcoin,wangxinxi/litecoin,jnewbery/bitcoin,Exgibichi/statusquo,practicalswift/bitcoin,ctwiz/stardust,bespike/litecoin,stamhe/bitcoin,andreaskern/bitcoin,myriadcoin/myriadcoin,alecalve/bitcoin,Electronic-Gulden-Foundation/egulden,GroestlCoin/GroestlCoin,21E14/bitcoin,XertroV/bitcoin-nulldata,x-kalux/bitcoin_WiG-B,BitcoinHardfork/bitcoin,Rav3nPL/bitcoin,EthanHeilman/bitcoin,funkshelper/woodcore,Sjors/bitcoin,pstratem/bitcoin,AdrianaDinca/bitcoin,tjps/bitcoin,RHavar/bitcoin,Electronic-Gulden-Foundation/egulden,reorder/viacoin,segsignal/bitcoin,AkioNak/bitcoin,pstratem/bitcoin,namecoin/namecore,Xekyo/bitcoin,stamhe/bitcoin,Diapolo/bitcoin,experiencecoin/experiencecoin,svost/bitcoin,myriadcoin/myriadcoin,Chancoin-core/CHANCOIN,sbaks0820/bitcoin,afk11/bitcoin,AkioNak/bitcoin,elecoin/elecoin,rnicoll/dogecoin,Alonzo-Coeus/bitcoin,shelvenzhou/BTCGPU,sstone/bitcoin,mm-s/bitcoin,droark/bitcoin,segwit/atbcoin-insight,mitchellcash/bitcoin,myriadteam/myriadcoin,jonasschnelli/bitcoin,Flowdalic/bitcoin,Jcing95/iop-hd,domob1812/bitcoin,dogecoin/dogecoin,argentumproject/argentum,psionin/smartcoin,btc1/bitcoin,gravio-net/graviocoin,qtumproject/qtum,viacoin/viacoin,vmp32k/litecoin,Electronic-Gulden-Foundation/egulden,untrustbank/litecoin,rnicoll/dogecoin,afk11/bitcoin,martindale/elements,ajtowns/bitcoin,trippysalmon/bitcoin,awemany/BitcoinUnlimited,Rav3nPL/bitcoin,vertcoin/vertcoin,lbrtcoin/albertcoin,donaloconnor/bitcoin,ryanofsky/bitcoin,appop/bitcoin,matlongsi/micropay,bespike/litecoin,Theshadow4all/ShadowCoin,awemany/BitcoinUnlimited,174high/bitcoin,andreaskern/bitcoin,bitcoinsSG/bitcoin,jiangyonghang/bitcoin,fsb4000/bitcoin,myriadteam/myriadcoin,ahmedbodi/vertcoin,digibyte/digibyte,sdaftuar/bitcoin,NicolasDorier/bitcoin,Kogser/bitcoin,plncoin/PLNcoin_Core,dpayne9000/Rubixz-Coin,XertroV/bitcoin-nulldata,ctwiz/stardust,ahmedbodi/vertcoin,plncoin/PLNcoin_Core,bitcoinplusorg/xbcwalletsource,UASF/bitcoin,trippysalmon/bitcoin,mm-s/bitcoin,dgarage/bc3,funkshelper/woodcore,Michagogo/bitcoin,MeshCollider/bitcoin,mm-s/bitcoin,gameunits/gameunits,namecoin/namecore,bitcoin/bitcoin,rawodb/bitcoin,dcousens/bitcoin,kazcw/bitcoin,s-matthew-english/bitcoin,bdelzell/creditcoin-org-creditcoin,r8921039/bitcoin,mincoin-project/mincoin,Alonzo-Coeus/bitcoin,sbaks0820/bitcoin,Kogser/bitcoin,Cocosoft/bitcoin,destenson/bitcoin--bitcoin,randy-waterhouse/bitcoin,Kogser/bitcoin,awemany/BitcoinUnlimited,senadmd/coinmarketwatch,cculianu/bitcoin-abc,Gazer022/bitcoin,Friedbaumer/litecoin,Kogser/bitcoin,djpnewton/bitcoin,mitchellcash/bitcoin,gzuser01/zetacoin-bitcoin,spiritlinxl/BTCGPU,kallewoof/bitcoin,ppcoin/ppcoin,Flowdalic/bitcoin,appop/bitcoin,goldcoin/goldcoin,EthanHeilman/bitcoin,ahmedbodi/temp_vert,gazbert/bitcoin,cannabiscoindev/cannabiscoin420,wellenreiter01/Feathercoin,trippysalmon/bitcoin,itmanagerro/tresting,pataquets/namecoin-core,sstone/bitcoin,Bushstar/UFO-Project,GroestlCoin/bitcoin,jl2012/litecoin,isle2983/bitcoin,gameunits/gameunits,monacoinproject/monacoin,sarielsaz/sarielsaz,joshrabinowitz/bitcoin,fujicoin/fujicoin,CryptArc/bitcoin,gjhiggins/vcoincore,FeatherCoin/Feathercoin,pinheadmz/bitcoin,shouhuas/bitcoin,TheBlueMatt/bitcoin,argentumproject/argentum,dgarage/bc3,Gazer022/bitcoin,Anfauglith/iop-hd,MeshCollider/bitcoin,core-bitcoin/bitcoin,Anfauglith/iop-hd,AdrianaDinca/bitcoin,senadmd/coinmarketwatch,isle2983/bitcoin,BTCGPU/BTCGPU,romanornr/viacoin,practicalswift/bitcoin,goldcoin/Goldcoin-GLD,jiangyonghang/bitcoin,mm-s/bitcoin,yenliangl/bitcoin,monacoinproject/monacoin,174high/bitcoin,domob1812/huntercore,Christewart/bitcoin,trippysalmon/bitcoin,paveljanik/bitcoin,isle2983/bitcoin,h4x3rotab/BTCGPU,pataquets/namecoin-core,thrasher-/litecoin,MarcoFalke/bitcoin,Mirobit/bitcoin,segsignal/bitcoin,experiencecoin/experiencecoin,wangxinxi/litecoin,kazcw/bitcoin,fsb4000/bitcoin,BigBlueCeiling/augmentacoin,uphold/bitcoin,ahmedbodi/terracoin,emc2foundation/einsteinium,ixcoinofficialpage/master,bitcoinplusorg/xbcwalletsource,dogecoin/dogecoin,gravio-net/graviocoin,vertcoin/vertcoin,BitcoinHardfork/bitcoin,shouhuas/bitcoin,RHavar/bitcoin,multicoins/marycoin,droark/bitcoin,chaincoin/chaincoin,simonmulser/bitcoin,jtimon/bitcoin,pstratem/bitcoin,anditto/bitcoin,argentumproject/argentum,reorder/viacoin,nikkitan/bitcoin,ericshawlinux/bitcoin,plncoin/PLNcoin_Core,martindale/elements,globaltoken/globaltoken,bitbrazilcoin-project/bitbrazilcoin,elecoin/elecoin,1185/starwels,oklink-dev/bitcoin,segsignal/bitcoin,mruddy/bitcoin,martindale/elements,plncoin/PLNcoin_Core,tjps/bitcoin,brandonrobertz/namecoin-core,bitcoin/bitcoin,Friedbaumer/litecoin,domob1812/namecore,segsignal/bitcoin,brandonrobertz/namecoin-core,s-matthew-english/bitcoin,Jcing95/iop-hd,n1bor/bitcoin,BigBlueCeiling/augmentacoin,kallewoof/bitcoin,goldcoin/goldcoin,segsignal/bitcoin,sbaks0820/bitcoin,paveljanik/bitcoin,Kogser/bitcoin,dscotese/bitcoin,bdelzell/creditcoin-org-creditcoin,cculianu/bitcoin-abc,lbrtcoin/albertcoin,BitzenyCoreDevelopers/bitzeny,DigitalPandacoin/pandacoin,Electronic-Gulden-Foundation/egulden,ftrader-bitcoinabc/bitcoin-abc,argentumproject/argentum,ryanxcharles/bitcoin,JeremyRubin/bitcoin,spiritlinxl/BTCGPU,ryanxcharles/bitcoin,Kogser/bitcoin,MazaCoin/maza,dgarage/bc2,practicalswift/bitcoin,destenson/bitcoin--bitcoin,bitbrazilcoin-project/bitbrazilcoin,Bushstar/UFO-Project,EntropyFactory/creativechain-core,gjhiggins/vcoincore,EntropyFactory/creativechain-core,cryptoprojects/ultimateonlinecash,argentumproject/argentum,globaltoken/globaltoken,ixcoinofficialpage/master,prusnak/bitcoin,Xekyo/bitcoin,HashUnlimited/Einsteinium-Unlimited,ixcoinofficialpage/master,wellenreiter01/Feathercoin,sebrandon1/bitcoin,GroestlCoin/GroestlCoin,goldcoin/goldcoin,litecoin-project/litecoin,gmaxwell/bitcoin,anditto/bitcoin,goldcoin/goldcoin,gravio-net/graviocoin,Theshadow4all/ShadowCoin,HashUnlimited/Einsteinium-Unlimited,gameunits/gameunits,dcousens/bitcoin,shouhuas/bitcoin,experiencecoin/experiencecoin,donaloconnor/bitcoin,174high/bitcoin,segsignal/bitcoin,nlgcoin/guldencoin-official,Bushstar/UFO-Project,jambolo/bitcoin,cryptoprojects/ultimateonlinecash,Theshadow4all/ShadowCoin,dgarage/bc2,joshrabinowitz/bitcoin,goldcoin/Goldcoin-GLD,metacoin/florincoin,chaincoin/chaincoin,dpayne9000/Rubixz-Coin,StarbuckBG/BTCGPU,reorder/viacoin,Gazer022/bitcoin,wiggi/huntercore,ixcoinofficialpage/master,magacoin/magacoin,jambolo/bitcoin,UFOCoins/ufo,TheBlueMatt/bitcoin,midnightmagic/bitcoin,Bitcoin-ABC/bitcoin-abc,zetacoin/zetacoin,Xekyo/bitcoin,kallewoof/bitcoin,xieta/mincoin,shaolinfry/litecoin,lbryio/lbrycrd,fujicoin/fujicoin,kazcw/bitcoin,wiggi/huntercore,XertroV/bitcoin-nulldata,EthanHeilman/bitcoin,mincoin-project/mincoin,ElementsProject/elements,ftrader-bitcoinabc/bitcoin-abc,cculianu/bitcoin-abc,jtimon/bitcoin,BTCGPU/BTCGPU,patricklodder/dogecoin,uphold/bitcoin,particl/particl-core,stamhe/bitcoin,rnicoll/dogecoin,shaolinfry/litecoin,litecoin-project/litecore-litecoin,jimmysong/bitcoin,anditto/bitcoin,multicoins/marycoin,BitcoinHardfork/bitcoin,nlgcoin/guldencoin-official,sebrandon1/bitcoin,Bushstar/UFO-Project,fanquake/bitcoin,nomnombtc/bitcoin,martindale/elements,FeatherCoin/Feathercoin,jtimon/bitcoin,wiggi/huntercore,Christewart/bitcoin,Chancoin-core/CHANCOIN,bitcoinsSG/bitcoin,bitreserve/bitcoin,jamesob/bitcoin,instagibbs/bitcoin,uphold/bitcoin,OmniLayer/omnicore,Friedbaumer/litecoin,ahmedbodi/terracoin,alecalve/bitcoin,alecalve/bitcoin,deeponion/deeponion,rebroad/bitcoin,n1bor/bitcoin,lbryio/lbrycrd,dscotese/bitcoin,xieta/mincoin,guncoin/guncoin,bitcoinsSG/bitcoin,domob1812/huntercore,rebroad/bitcoin,cryptoprojects/ultimateonlinecash,cdecker/bitcoin,fujicoin/fujicoin,OmniLayer/omnicore,bitbrazilcoin-project/bitbrazilcoin,domob1812/huntercore,haobtc/bitcoin,jmcorgan/bitcoin,achow101/bitcoin,jimmysong/bitcoin,sebrandon1/bitcoin,kallewoof/elements,CryptArc/bitcoin,UFOCoins/ufo,Cocosoft/bitcoin,thrasher-/litecoin,MarcoFalke/bitcoin,kazcw/bitcoin,MeshCollider/bitcoin,ElementsProject/elements,kevcooper/bitcoin,x-kalux/bitcoin_WiG-B,patricklodder/dogecoin,Xekyo/bitcoin,paveljanik/bitcoin,magacoin/magacoin,tecnovert/particl-core,s-matthew-english/bitcoin,elecoin/elecoin,viacoin/viacoin,nbenoit/bitcoin,namecoin/namecoin-core,jamesob/bitcoin,TheBlueMatt/bitcoin,kallewoof/elements,aspanta/bitcoin,shaolinfry/litecoin,GlobalBoost/GlobalBoost,achow101/bitcoin,mincoin-project/mincoin,zcoinofficial/zcoin,jmcorgan/bitcoin,okinc/bitcoin,bespike/litecoin,DigiByte-Team/digibyte,ryanxcharles/bitcoin,bitreserve/bitcoin,shouhuas/bitcoin,laudaa/bitcoin,monacoinproject/monacoin,dcousens/bitcoin,shelvenzhou/BTCGPU,jlopp/statoshi,Anfauglith/iop-hd,ryanofsky/bitcoin,core-bitcoin/bitcoin,Bitcoin-ABC/bitcoin-abc,Kogser/bitcoin,shelvenzhou/BTCGPU,pinheadmz/bitcoin,paveljanik/bitcoin,multicoins/marycoin,lateminer/bitcoin,sstone/bitcoin,apoelstra/bitcoin,ixcoinofficialpage/master,senadmd/coinmarketwatch,rnicoll/bitcoin,appop/bitcoin,litecoin-project/litecoin,BitcoinHardfork/bitcoin,tdudz/elements,instagibbs/bitcoin,globaltoken/globaltoken,ftrader-bitcoinabc/bitcoin-abc,ahmedbodi/terracoin,sbaks0820/bitcoin,practicalswift/bitcoin,bitcoinplusorg/xbcwalletsource,donaloconnor/bitcoin,awemany/BitcoinUnlimited,xieta/mincoin,instagibbs/bitcoin,jonasschnelli/bitcoin,peercoin/peercoin,ftrader-bitcoinabc/bitcoin-abc,Gazer022/bitcoin,wangxinxi/litecoin,s-matthew-english/bitcoin,chaincoin/chaincoin,Sjors/bitcoin,goldcoin/goldcoin,gazbert/bitcoin,randy-waterhouse/bitcoin,guncoin/guncoin,OmniLayer/omnicore,dpayne9000/Rubixz-Coin,kallewoof/elements,reorder/viacoin,BTCGPU/BTCGPU,DigiByte-Team/digibyte,domob1812/namecore,oklink-dev/bitcoin,guncoin/guncoin,aspanta/bitcoin,pinheadmz/bitcoin,jl2012/litecoin,magacoin/magacoin,aspanta/bitcoin,s-matthew-english/bitcoin,MarcoFalke/bitcoin,MarcoFalke/bitcoin,cdecker/bitcoin,BTCGPU/BTCGPU,Mirobit/bitcoin,multicoins/marycoin,laudaa/bitcoin,vmp32k/litecoin,kallewoof/elements,Bitcoin-ABC/bitcoin-abc,jl2012/litecoin,Anfauglith/iop-hd,viacoin/viacoin,jambolo/bitcoin,Diapolo/bitcoin,tdudz/elements,domob1812/bitcoin,andreaskern/bitcoin,okinc/bitcoin,apoelstra/bitcoin,lateminer/bitcoin,litecoin-project/litecore-litecoin,n1bor/bitcoin,jtimon/bitcoin,bdelzell/creditcoin-org-creditcoin,tecnovert/particl-core,Diapolo/bitcoin,jambolo/bitcoin,ahmedbodi/vertcoin,tdudz/elements,btc1/bitcoin,digibyte/digibyte,psionin/smartcoin,nomnombtc/bitcoin,donaloconnor/bitcoin,ftrader-bitcoinabc/bitcoin-abc,EntropyFactory/creativechain-core,DigitalPandacoin/pandacoin,jiangyonghang/bitcoin,bitreserve/bitcoin,metacoin/florincoin,mb300sd/bitcoin,ahmedbodi/temp_vert,pataquets/namecoin-core,tecnovert/particl-core,wangxinxi/litecoin,emc2foundation/einsteinium,wiggi/huntercore,ajtowns/bitcoin,argentumproject/argentum,randy-waterhouse/bitcoin,itmanagerro/tresting,Sjors/bitcoin,GroestlCoin/bitcoin,pstratem/bitcoin,GlobalBoost/GlobalBoost,yenliangl/bitcoin,qtumproject/qtum,cdecker/bitcoin,metacoin/florincoin,pstratem/bitcoin,experiencecoin/experiencecoin,svost/bitcoin,rnicoll/bitcoin,ShadowMyst/creativechain-core,nbenoit/bitcoin,x-kalux/bitcoin_WiG-B,GroestlCoin/GroestlCoin,UASF/bitcoin,Theshadow4all/ShadowCoin,paveljanik/bitcoin,namecoin/namecore,Bitcoin-ABC/bitcoin-abc,domob1812/namecore,viacoin/viacoin,metacoin/florincoin,fanquake/bitcoin,dscotese/bitcoin,Alonzo-Coeus/bitcoin,vertcoin/vertcoin,sipsorcery/bitcoin,r8921039/bitcoin,trippysalmon/bitcoin,Michagogo/bitcoin,midnightmagic/bitcoin,goldcoin/goldcoin,Christewart/bitcoin,StarbuckBG/BTCGPU,ppcoin/ppcoin,emc2foundation/einsteinium,mruddy/bitcoin,droark/bitcoin,BitcoinHardfork/bitcoin,Theshadow4all/ShadowCoin,kazcw/bitcoin,elecoin/elecoin,maaku/bitcoin,stamhe/bitcoin,joshrabinowitz/bitcoin,RHavar/bitcoin,jmcorgan/bitcoin,monacoinproject/monacoin,RHavar/bitcoin,Exgibichi/statusquo,StarbuckBG/BTCGPU,dgarage/bc2,destenson/bitcoin--bitcoin,aspanta/bitcoin,FeatherCoin/Feathercoin,bitcoinsSG/bitcoin,Rav3nPL/PLNcoin,alecalve/bitcoin,Chancoin-core/CHANCOIN,rawodb/bitcoin,Alonzo-Coeus/bitcoin,rnicoll/bitcoin,AkioNak/bitcoin,particl/particl-core,fujicoin/fujicoin,lbryio/lbrycrd,GroestlCoin/GroestlCoin,mruddy/bitcoin,sdaftuar/bitcoin,cdecker/bitcoin,daliwangi/bitcoin,bitcoinec/bitcoinec,pstratem/bitcoin,isle2983/bitcoin,matlongsi/micropay,haobtc/bitcoin,ahmedbodi/terracoin,bitcoinec/bitcoinec,kallewoof/bitcoin,Jcing95/iop-hd,haobtc/bitcoin,21E14/bitcoin,pinheadmz/bitcoin,BitzenyCoreDevelopers/bitzeny,mincoin-project/mincoin,fanquake/bitcoin,nomnombtc/bitcoin,digibyte/digibyte,BitcoinPOW/BitcoinPOW,earonesty/bitcoin,experiencecoin/experiencecoin,jmcorgan/bitcoin,EthanHeilman/bitcoin,ElementsProject/elements,Bushstar/UFO-Project,jiangyonghang/bitcoin,Anfauglith/iop-hd,Theshadow4all/ShadowCoin,gzuser01/zetacoin-bitcoin,AkioNak/bitcoin,bitcoinknots/bitcoin,romanornr/viacoin,midnightmagic/bitcoin,starwels/starwels,UFOCoins/ufo,zcoinofficial/zcoin,earonesty/bitcoin,HashUnlimited/Einsteinium-Unlimited,core-bitcoin/bitcoin,Friedbaumer/litecoin,zcoinofficial/zcoin,mb300sd/bitcoin,Friedbaumer/litecoin,xieta/mincoin,dscotese/bitcoin,nbenoit/bitcoin,Kogser/bitcoin,thrasher-/litecoin,joshrabinowitz/bitcoin,ppcoin/ppcoin,litecoin-project/litecoin,zetacoin/zetacoin,ajtowns/bitcoin,namecoin/namecoin-core,goldcoin/Goldcoin-GLD,deeponion/deeponion,DigitalPandacoin/pandacoin,sebrandon1/bitcoin,gameunits/gameunits,Flowdalic/bitcoin,Diapolo/bitcoin,djpnewton/bitcoin,laudaa/bitcoin,Rav3nPL/PLNcoin,segwit/atbcoin-insight,Bitcoin-ABC/bitcoin-abc,aspanta/bitcoin,prusnak/bitcoin,gjhiggins/vcoincore,174high/bitcoin,magacoin/magacoin,jtimon/bitcoin,bitcoin/bitcoin,NicolasDorier/bitcoin,nbenoit/bitcoin,brandonrobertz/namecoin-core,ahmedbodi/vertcoin,fsb4000/bitcoin,nomnombtc/bitcoin,starwels/starwels,OmniLayer/omnicore,djpnewton/bitcoin,matlongsi/micropay,Mirobit/bitcoin,BitcoinPOW/BitcoinPOW,wellenreiter01/Feathercoin,Cocosoft/bitcoin,Alonzo-Coeus/bitcoin,goldcoin/Goldcoin-GLD,simonmulser/bitcoin,segwit/atbcoin-insight,Sjors/bitcoin,jl2012/litecoin,gzuser01/zetacoin-bitcoin,namecoin/namecoin-core,gazbert/bitcoin,nathaniel-mahieu/bitcoin,globaltoken/globaltoken,midnightmagic/bitcoin,dgarage/bc3,multicoins/marycoin,bespike/litecoin,kevcooper/bitcoin,svost/bitcoin,BitcoinHardfork/bitcoin,21E14/bitcoin,gazbert/bitcoin,prusnak/bitcoin,1185/starwels,h4x3rotab/BTCGPU,Bitcoin-ABC/bitcoin-abc,cannabiscoindev/cannabiscoin420,goldcoin/Goldcoin-GLD,Xekyo/bitcoin,Christewart/bitcoin,xieta/mincoin,lateminer/bitcoin,reorder/viacoin,haobtc/bitcoin,gzuser01/zetacoin-bitcoin,globaltoken/globaltoken,simonmulser/bitcoin,ericshawlinux/bitcoin,ahmedbodi/temp_vert,jmcorgan/bitcoin,oklink-dev/bitcoin,Cocosoft/bitcoin,rebroad/bitcoin,AkioNak/bitcoin,r8921039/bitcoin,UFOCoins/ufo,particl/particl-core,cculianu/bitcoin-abc,BTCGPU/BTCGPU,cannabiscoindev/cannabiscoin420,lbrtcoin/albertcoin,psionin/smartcoin,ahmedbodi/vertcoin,Michagogo/bitcoin,romanornr/viacoin,domob1812/namecore,romanornr/viacoin,nikkitan/bitcoin,appop/bitcoin,practicalswift/bitcoin,sdaftuar/bitcoin,lbryio/lbrycrd,dogecoin/dogecoin,domob1812/bitcoin,myriadcoin/myriadcoin,FeatherCoin/Feathercoin,namecoin/namecoin-core,ahmedbodi/temp_vert,fsb4000/bitcoin,ericshawlinux/bitcoin,zetacoin/zetacoin,dogecoin/dogecoin,bitcoin/bitcoin,anditto/bitcoin,sarielsaz/sarielsaz,MazaCoin/maza,AdrianaDinca/bitcoin,Kogser/bitcoin,plncoin/PLNcoin_Core,dgarage/bc3,mitchellcash/bitcoin,matlongsi/micropay,anditto/bitcoin,sipsorcery/bitcoin,lbrtcoin/albertcoin,tdudz/elements,174high/bitcoin,GroestlCoin/bitcoin,ctwiz/stardust,ctwiz/stardust,shouhuas/bitcoin,21E14/bitcoin,jlopp/statoshi,romanornr/viacoin,elecoin/elecoin,ahmedbodi/temp_vert,nathaniel-mahieu/bitcoin,qtumproject/qtum,vertcoin/vertcoin,uphold/bitcoin,laudaa/bitcoin,JeremyRubin/bitcoin,Bitcoin-ABC/bitcoin-abc,gmaxwell/bitcoin,bitcoinplusorg/xbcwalletsource,tjps/bitcoin,Gazer022/bitcoin,Cocosoft/bitcoin,vmp32k/litecoin,anditto/bitcoin,apoelstra/bitcoin,instagibbs/bitcoin,Anfauglith/iop-hd,laudaa/bitcoin,JeremyRubin/bitcoin,gameunits/gameunits,174high/bitcoin,gravio-net/graviocoin,CryptArc/bitcoin,nlgcoin/guldencoin-official,MarcoFalke/bitcoin,nbenoit/bitcoin,afk11/bitcoin,Jcing95/iop-hd,alecalve/bitcoin,dcousens/bitcoin,alecalve/bitcoin,DigiByte-Team/digibyte,guncoin/guncoin,ixcoinofficialpage/master,x-kalux/bitcoin_WiG-B,zcoinofficial/zcoin,gjhiggins/vcoincore,rawodb/bitcoin,btc1/bitcoin,wellenreiter01/Feathercoin,awemany/BitcoinUnlimited,vertcoin/vertcoin,kevcooper/bitcoin,namecoin/namecore,particl/particl-core,bitcoinknots/bitcoin,Bitcoin-ABC/bitcoin-abc,namecoin/namecore,spiritlinxl/BTCGPU,Bitcoin-ABC/bitcoin-abc,simonmulser/bitcoin,vmp32k/litecoin,gjhiggins/vcoincore,mb300sd/bitcoin,qtumproject/qtum,maaku/bitcoin,Flowdalic/bitcoin,gmaxwell/bitcoin,litecoin-project/litecoin,ajtowns/bitcoin,Rav3nPL/PLNcoin,cannabiscoindev/cannabiscoin420,myriadcoin/myriadcoin,gmaxwell/bitcoin,lbrtcoin/albertcoin,BitcoinPOW/BitcoinPOW,UASF/bitcoin,matlongsi/micropay,Rav3nPL/bitcoin,sipsorcery/bitcoin,BitcoinPOW/BitcoinPOW,okinc/bitcoin,mruddy/bitcoin,senadmd/coinmarketwatch,vertcoin/vertcoin,pinheadmz/bitcoin,BTCGPU/BTCGPU,ryanofsky/bitcoin,midnightmagic/bitcoin,domob1812/huntercore,myriadteam/myriadcoin,prusnak/bitcoin,cryptoprojects/ultimateonlinecash,bitcoin/bitcoin,fanquake/bitcoin,prusnak/bitcoin,nathaniel-mahieu/bitcoin,OmniLayer/omnicore,h4x3rotab/BTCGPU,Christewart/bitcoin,wangxinxi/litecoin,xieta/mincoin,okinc/bitcoin,bespike/litecoin,jnewbery/bitcoin,GroestlCoin/GroestlCoin,bitcoinec/bitcoinec,x-kalux/bitcoin_WiG-B,BitzenyCoreDevelopers/bitzeny,monacoinproject/monacoin,EntropyFactory/creativechain-core,cdecker/bitcoin,emc2foundation/einsteinium,achow101/bitcoin,StarbuckBG/BTCGPU,EthanHeilman/bitcoin,itmanagerro/tresting,apoelstra/bitcoin,stamhe/bitcoin,s-matthew-english/bitcoin,zetacoin/zetacoin,DigitalPandacoin/pandacoin,thrasher-/litecoin,sarielsaz/sarielsaz,bitcoinknots/bitcoin,ftrader-bitcoinabc/bitcoin-abc,andreaskern/bitcoin,digibyte/digibyte,TheBlueMatt/bitcoin,joshrabinowitz/bitcoin,h4x3rotab/BTCGPU,rebroad/bitcoin,martindale/elements,ShadowMyst/creativechain-core,thrasher-/litecoin,zetacoin/zetacoin,GlobalBoost/GlobalBoost,pataquets/namecoin-core,Bitcoin-ABC/bitcoin-abc,wiggi/huntercore,n1bor/bitcoin,chaincoin/chaincoin,domob1812/namecore,brandonrobertz/namecoin-core,rnicoll/bitcoin,pataquets/namecoin-core,ryanofsky/bitcoin,brandonrobertz/namecoin-core,fsb4000/bitcoin,funkshelper/woodcore,tecnovert/particl-core,patricklodder/dogecoin,kallewoof/bitcoin,yenliangl/bitcoin,mruddy/bitcoin,jimmysong/bitcoin,peercoin/peercoin,segwit/atbcoin-insight,nikkitan/bitcoin,GroestlCoin/bitcoin,ryanxcharles/bitcoin,MeshCollider/bitcoin,simonmulser/bitcoin,lbryio/lbrycrd,GroestlCoin/bitcoin,jl2012/litecoin,Xekyo/bitcoin,tecnovert/particl-core,NicolasDorier/bitcoin,Michagogo/bitcoin,bitcoinec/bitcoinec,rnicoll/bitcoin,nomnombtc/bitcoin,lateminer/bitcoin,zcoinofficial/zcoin,peercoin/peercoin,FeatherCoin/Feathercoin,jimmysong/bitcoin,shelvenzhou/BTCGPU,oklink-dev/bitcoin,itmanagerro/tresting,Exgibichi/statusquo,Mirobit/bitcoin,practicalswift/bitcoin,daliwangi/bitcoin,jlopp/statoshi,BTCDDev/bitcoin,litecoin-project/litecore-litecoin,n1bor/bitcoin,BitcoinPOW/BitcoinPOW,Jcing95/iop-hd,nathaniel-mahieu/bitcoin,BitzenyCoreDevelopers/bitzeny,aspanta/bitcoin,Kogser/bitcoin,starwels/starwels,Bitcoin-ABC/bitcoin-abc,nikkitan/bitcoin | qa/rpc-tests/p2p-mempool.py | qa/rpc-tests/p2p-mempool.py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
def send_mempool(self):
self.lastInv = []
self.send_message(msg_mempool())
class P2PMempoolTests(BitcoinTestFramework):
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-peerbloomfilters=0"]))
def run_test(self):
#connect a mininode
aTestNode = TestNode()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_mempool()
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| mit | Python | |
db9b756dbf68fde9930da8ab6b4594fa3f1d361e | Fix cascades for RecurringEventOverride table | gale320/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,closeio/nylas,ErinCall/sync-engine,wakermahmud/sync-engine,Eagles2F/sync-engine,jobscore/sync-engine,gale320/sync-engine,jobscore/sync-engine,wakermahmud/sync-engine,gale320/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,gale320/sync-engine,jobscore/sync-engine,jobscore/sync-engine,PriviPK/privipk-sync-engine,wakermahmud/sync-engine,wakermahmud/sync-engine,ErinCall/sync-engine,closeio/nylas,nylas/sync-engine,Eagles2F/sync-engine,ErinCall/sync-engine,Eagles2F/sync-engine,closeio/nylas,PriviPK/privipk-sync-engine,gale320/sync-engine,PriviPK/privipk-sync-engine,PriviPK/privipk-sync-engine,nylas/sync-engine,wakermahmud/sync-engine,PriviPK/privipk-sync-engine,closeio/nylas | migrations/versions/175_fix_recurring_override_cascade.py | migrations/versions/175_fix_recurring_override_cascade.py | """fix recurring override cascade
Revision ID: 6e5b154d917
Revises: 41f957b595fc
Create Date: 2015-05-25 16:23:40.563050
"""
# revision identifiers, used by Alembic.
revision = '6e5b154d917'
down_revision = '4ef055945390'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`) ON DELETE CASCADE"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`)"))
| agpl-3.0 | Python | |
730548fe74dda462d7aac1e3c5ee8e8ba47f4371 | Add script that extracts clips from HDF5 file. | HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper | scripts/extract_clips_from_hdf5_file.py | scripts/extract_clips_from_hdf5_file.py | from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
| mit | Python | |
d5aa5aa96aad03b1bd32504b1c9d0a87c1a1c796 | Create y=Wx+b.py | bayvictor/distributed-polling-system,bayvictor/distributed-polling-system,bayvictor/distributed-polling-system,bayvictor/distributed-polling-system,bayvictor/distributed-polling-system | y=Wx+b.py | y=Wx+b.py | import tensorflow as tf
import numpy as np
x_data = np.random.rand(100).astype("float32")
y_data = x_data * .1 +.3
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0 ))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
loss = tf.reduce_mean(tf.square(y - y_data ))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in xrange(201):
sess.run(train)
if step % 20 == 0:
print (step, sess.run(W), sess.run(b))
| apache-2.0 | Python | |
a570730af71e3263af2f265a1730db3f808cd201 | Add ex_add_noise.py | waynegm/OpendTect-External-Attributes | Python_3/Miscellaneous/ex_addnoise.py | Python_3/Miscellaneous/ex_addnoise.py | # Add gaussian noise to an input
#
# Copyright (C) 2016 Wayne Mogg All rights reserved.
#
# This file may be used under the terms of the MIT License
# (https://github.com/waynegm/OpendTect-External-Attributes/blob/master/LICENSE)
#
# Author: Wayne Mogg
# Date: September, 2016
# Homepage: http://waynegm.github.io/OpendTect-Plugin-Docs/External_Attributes/ExternalAttributes/
#
# Input: Single trace seismic data
# Output: Seismic data with added gaussian noise
#
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
# The attribute parameters
#
xa.params = {
'Inputs': ['Input'],
'ZSampMargin' : {'Value': [-5,5], 'Hidden': True, 'Symmetric': True},
'Par_0' : {'Name': 'S/N Ratio', 'Value': 1},
'Parallel' : True,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/Attributes/ExternalAttrib/'
}
#
# Define the compute function
#
def doCompute():
#
# Initialise some constants from the attribute parameters
#
zw = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
#
# This is the trace processing loop
#
while True:
xa.doInput()
data = xa.Input['Input'][0,0,:]
#
# Compute noise
#
vardata = np.var(data)
noise = np.random.randn(data.shape[-1])
varnoise = np.var(noise)
scale = vardata/(varnoise*xa.params['Par_0']['Value'])
#
# Output
#
xa.Output = data + scale*noise
xa.doOutput()
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
| mit | Python | |
b72a4bb06fda18ebca91649808cd2f2c531b392e | Set all events to show banner text | NewAcropolis/api,NewAcropolis/api,NewAcropolis/api | migrations/versions/0060.py | migrations/versions/0060.py | """empty message
Revision ID: 0060 set all show_banner_text
Revises: 0059 add show_banner_text
Create Date: 2021-10-03 00:31:22.285217
"""
# revision identifiers, used by Alembic.
revision = '0060 set all show_banner_text'
down_revision = '0059 add show_banner_text'
from alembic import op
def upgrade():
op.execute("UPDATE events SET show_banner_text = True")
def downgrade():
pass
| mit | Python | |
9132678df072e0c11685aea21c04410fe699ce4f | Create Majority_Element.py | UmassJin/Leetcode | Array/Majority_Element.py | Array/Majority_Element.py | '''
Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
You may assume that the array is non-empty and the majority element always exist in the array.
'''
class Solution:
# @param {integer[]} nums
# @return {integer}
def majorityElement(self, nums):
count = 0
result = 0
for num in nums:
if count == 0:
result = num
count += 1
elif result == num:
count += 1
else:
count -= 1
return result
| mit | Python | |
4b1ac6217d054bd2fe8e5e6b4cfe036e2a4d0360 | Add a template of setup.py. | FGtatsuro/flask-boilerplate,FGtatsuro/flask-boilerplate,FGtatsuro/flask-boilerplate | setup.py | setup.py | from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='flask-boilerplate',
version=version,
description='',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
classifiers=[
], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='Flask PasteScript',
author='Tatsuro Fujii',
author_email='fujiistorage@gmail.com',
url='https://github.com/FGtatsuro/flask-boilerplate',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| mit | Python | |
b1f689f82bbb6d26511b6a310be798dad1791fc5 | add setup.py | vanatteveldt/saf | setup.py | setup.py | from distutils.core import setup
setup(
version='0.10',
name="saf",
description="Python toolkit for handling Simple Annotation Framework files",
author="Wouter van Atteveldt",
author_email="wouter@vanatteveldt.com",
packages=["saf"],
classifiers=[
"License :: OSI Approved :: MIT License",
],
)
| mit | Python | |
e37dae306f2dcf17e95a988b332c064fde11fb1a | Create setup.py | chrisdpa/rakolighting | setup.py | setup.py | from setuptools import setup
setup(name='rakolighting',
version='0.1',
description='rakolighting library',
url='https://github.com/chrisdpa/rakolighting',
author='chrisdpa',
author_email='unknown',
license='MIT',
packages=['rakolighting'],
zip_safe=False)
| mit | Python | |
e1c35ee11d281692f916ebf57b38390b90501304 | Create texted.py | introprogramming/exercises,introprogramming/exercises,introprogramming/exercises | exercises/text-editor/texted.py | exercises/text-editor/texted.py | import Tkinter as Tk
import tkFileDialog
# Text Editor Skeleton
def on_new():
# reset path and delete all text in the text box
print "Not implemented"
def on_open():
# let user choose what file to open from a dialog (tkFileDialog)
# replace text in text box with text from file
# handle cancelling of the dialog responsibely
print "Not implemented"
def on_save():
# mimic common "save" behavior
# if the path is already set, save the file using save_file(), otherwise:
# let user choose a file to save the content in the text box to (tkFileDialog)
# make sure the path is valid (not empty), save the file using save_file()
print "Not implemented"
def on_save_as():
# mimic common "save as" behavior
# almost the same as on_save(), difference: this always opens a file dialog
print "Not implemented"
def get_all_text():
# returns all text in the text box
# should be one line of code
# not neccessary but may make the code in other places nicer
print "Not implemented"
def delete_all_text():
# deletes all text in the text box
# should be one line of code
# not neccessary but may make the code in other places nicer
print "Not implemented"
def save_file(save_path, text):
# open file in save_path in write mode
# write the text to the file
# close the file
print "Not implemented"
def read_file(file_path):
# open file in file_path
# return the text
print "Not implemented"
# Initialize application
app = Tk.Tk()
app.title("Your Title Here")
# Sets the geometry on the form widthxheight+x_pos+y_pos
app.geometry("200x300+300+300")
# Save path, empty until file is opened or saved
# Used to mimic common file saving/opening behavior
path = ''
######################################################
# IMPLEMENT UI HERE
######################################################
# MENU BAR EXAMPLE
menu_bar = Tk.Menu()
# Set menu bar as menu for the app
app.config(menu=menu_bar)
# Fill menubar with "File" menu
filemenu = Tk.Menu(menu_bar, tearoff=0)
filemenu.add_command(label="Exit", command=quit)
menu_bar.add_cascade(label="File", menu=filemenu)
# BUTTON EXAMPLE
button = Tk.Button(app, text="Exit", command=quit)
button.pack(side=Tk.BOTTOM, fill=Tk.X)
######################################################
# Start the main event loop (i.e. run the tkinter program)
app.mainloop()
| mit | Python | |
d2cbe26e14e23a4482e54a74da1412c5c0c28500 | Update package info | napuzba/zagoload,napuzba/FileLoader | setup.py | setup.py | from distutils.core import setup
setup(
name = 'fileloader',
packages = ['fileloader'],
version = '0.1',
description = 'Downloading files (http,ftp). Supports cachinhg and allows uniform access to remote and local files',
author = 'napuzba',
author_email = 'kobi@napuzba.com',
url = 'https://github.com/napuzba/FileLoader.git',
download_url = 'https://github.com/napuzba/fileloader/archive/0.1.zip',
keywords = ['download','ftp','http'],
classifiers = [],
) | from distutils.core import setup
setup(
name = 'fileloader',
packages = ['fileloader'],
version = '0.1',
description = 'Downloading files (support http and ftp protocols, cachinhg, allows accessing remote and local files in uniform way',
author = 'napuzba',
author_email = 'kobi@napuzba.com',
url = 'https://github.com/napuzba/FileLoader.git',
download_url = 'https://github.com/napuzba/FileLoader/archive/0.1.zip',
keywords = ['download','ftp','http'],
classifiers = [],
) | mit | Python |
d6ccfdf365b8df4eefcbe1131dd8b19d184b0fa4 | add monkey patch test for convert command. | cournape/Bento,cournape/Bento,cournape/Bento,cournape/Bento | bento/commands/tests/test_convert.py | bento/commands/tests/test_convert.py | import sys
from bento.misc.testing \
import \
SubprocessTestCase
from bento.commands.convert \
import \
monkey_patch
class TestMonkeyPath(SubprocessTestCase):
def test_distutils(self):
monkey_patch("distutils", "setup.py")
self.assertTrue("setuptools" not in sys.modules)
def test_setuptools(self):
monkey_patch("setuptools", "setup.py")
self.assertTrue("setuptools" in sys.modules)
| bsd-3-clause | Python | |
082c48bcd747c096abd0cd2970edb8cbb0f3d20b | Add contribution admin | stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten | features/contributions/admin.py | features/contributions/admin.py | from django.contrib import admin
from . import models
admin.site.register(models.Contribution)
| agpl-3.0 | Python | |
65f903a1de88cee2fdd6fe16cf86aceee3545d7b | Add example | JohnLunzer/flexx,JohnLunzer/flexx,jrversteegh/flexx,jrversteegh/flexx,JohnLunzer/flexx,zoofIO/flexx,zoofIO/flexx | flexx/ui/examples/serve_data.py | flexx/ui/examples/serve_data.py | """
This example demonstrates how data can be provided to the client with the
Flexx asset management system.
There are two ways to provide data: via the asset store (``app.assets``),
and via the session (``some_model.session``). In the former, the data
is shared between sessions. In the latter, the data is specific for the
session (the link to the data includes the session id).
Note that ``add_shared_data()`` and ``add_data()`` both return the link
to the data for convenience. Shared data is always served at
'/_data/shared/filename.ext', so we just use that explicitly here.
Similarly, the data provided by the server can be obtained using Ajax
(i.e. XMLHttpRequest).
"""
import random
import imageio
from flexx import app, event, ui
from flexx.util import png
# Define names of standard images
imageio_standard_images = ['clock.png', 'page.png', 'camera.png', 'coins.png',
'hubble_deep_field.png', 'text.png', 'chelsea.png',
'coffee.png', 'horse.png', 'wikkie.png', 'moon.png',
'astronaut.png', 'immunohistochemistry.png']
# Randomly select a shared image
fname = random.choice(imageio_standard_images)
image_data = png.write_png(imageio.imread(fname))
app.assets.add_shared_data('image.png', image_data)
class Example(ui.Widget):
def init(self):
with ui.VBox():
# Randomly select image - different between sessions
fname = random.choice(imageio_standard_images)
image_data = png.write_png(imageio.imread(fname))
link = self.session.add_data('image.png', image_data)
# Create HTML with the two images
html = '<p>Hit F5 to reload the page (i.e. create a new session)</p>'
html += '<p>This is session %s</p>' % self.session.id
html += '<img src="%s" />' % "/_data/shared/image.png"
html += '<img src="%s" />' % link
ui.Label(text=html)
if __name__ == '__main__':
# Launch the app twice to show how different sessions have different data
m1 = app.launch(Example, 'browser')
m2 = app.launch(Example, 'browser')
app.run()
| bsd-2-clause | Python | |
17018750ac3ea39c4fe5a96c05db2375ecd4973e | Add regression test for #717 | recognai/spaCy,recognai/spaCy,raphael0202/spaCy,honnibal/spaCy,aikramer2/spaCy,raphael0202/spaCy,explosion/spaCy,spacy-io/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,spacy-io/spaCy,raphael0202/spaCy,recognai/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,raphael0202/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,Gregory-Howard/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,recognai/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,raphael0202/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,aikramer2/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,recognai/spaCy | spacy/tests/regression/test_issue717.py | spacy/tests/regression/test_issue717.py | # coding: utf8
from __future__ import unicode_literals
import pytest
@pytest.mark.xfail
@pytest.mark.models
@pytest.mark.parametrize('text1,text2', [("You're happy", "You are happy")])
def test_issue717(EN, text1, text2):
"""Test that contractions are assigned the correct lemma."""
doc1 = EN(text1)
doc2 = EN(text2)
assert doc1[1].lemma_ == doc2[1].lemma_
assert doc1[1].lemma == doc2[1].lemma
| mit | Python | |
b7efac523bab70532dd2e703f8d4175ec22b3044 | Add output.base unit test. | amorphic/braubuddy,amorphic/braubuddy,amorphic/braubuddy | braubuddy/tests/outputs/test_base.py | braubuddy/tests/outputs/test_base.py | # -*- coding: utf-8 -*-
"""
Braubuddy Base unit tests
"""
import unittest
from braubuddy.output import base
class IOutput(unittest.TestCase):
def test_map_c_to_symbol(self):
"""c is mapped to °C"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('c'), '°C')
def test_map_C_to_symbol(self):
"""C is mapped to °C"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('C'), '°C')
def test_map_celsius_to_symbol(self):
"""celsius is mapped to °C"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('celsius'), '°C')
def test_map_c_to_symbol(self):
"""Celsius is mapped to °C"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('Celsius'), '°C')
def test_map_f_to_symbol(self):
"""f is mapped to °F"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('f'), '°F')
def test_map_F_to_symbol(self):
"""F is mapped to °F"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('F'), '°F')
def test_map_fahrenheit_to_symbol(self):
"""fahrenheit is mapped to °F"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('fahrenheit'), '°F')
def test_map_Fahrenheit_to_symbol(self):
"""Fahrenheit is mapped to °F"""
self.assertEqual(
base.IOutput.map_temp_units_to_symbol('Fahrenheit'), '°F')
| bsd-3-clause | Python | |
437c45509bb2f6387b83cf7d47e51ce46d1c2776 | Add unit test | WoLpH/py-trello,sarumont/py-trello,Wooble/py-trello,mehdy/py-trello,gchp/py-trello,portante/py-trello,ntrepid8/py-trello,merlinpatt/py-trello,nMustaki/py-trello | tests.py | tests.py | from models import AuthenticationError,AuthenticationRequired
import trello
import unittest
import os
class TestTrello(unittest.TestCase):
def test_login(self):
username = os.environ['TRELLO_TEST_USER']
password = os.environ['TRELLO_TEST_PASS']
try:
trello.login(username, password)
except AuthenticationError:
self.fail("Could not authenticate")
except Exception as e:
self.fail("Unknown error: "+str(e))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | Python | |
2324be51d7ded00ad9b92ededff93b57f8b656c0 | add labeltile program | lunkwill42/homebrewtools | labeltile.py | labeltile.py | #!/usr/bin/env python3
import argparse
from collections import Counter
from math import ceil, floor
import colorsys
import logging
from PIL import Image, ImageDraw
__author__ = 'Morten Brekkevold <morten@snabel.org>'
__copyright__ = '(C) 2015 Morten Brekkevold'
__license__ = 'MIT'
_logger = logging.getLogger('beerlabeltile')
INCH = 25.4 # mm
A4 = (210, 297) # mm
DEFAULT_DPI = 300
DEFAULT_COLOR = (0xff, 0xff, 0xff, 0x00) # transparent white
LIGHTNESS_FACTOR = 1.1 # how much to raise the lightness for the grid color
GUIDE_WIDTH = 3 # pixels
def main():
args = parse_args()
logging.basicConfig(level=logging.getLevelName(args.loglevel.upper()))
label = Image.open(args.inputfile)
dpi_x, dpi_y = label.info.get('dpi', (DEFAULT_DPI, DEFAULT_DPI))
ppmm = (dpi_x/INCH, dpi_y/INCH)
_logger.debug("Image size: %sx%s (%3.1fx%3.1f mm)",
label.size[0], label.size[1],
label.size[0]/ppmm[0], label.size[1]/ppmm[1])
sheet_size = (round(A4[0]*ppmm[0]), round(A4[1]*ppmm[1]))
_logger.debug("Sheet size in pixels: %s", sheet_size)
sheet = Image.new(mode="RGBA", size=sheet_size, color=DEFAULT_COLOR)
fit_x = floor(sheet.size[0] / label.size[0])
fit_y = floor(sheet.size[1] / label.size[1])
_logger.debug("Can fit at most (%sx%s) = %s labels on one A4 sheet",
fit_x, fit_y, fit_x*fit_y)
# Calculate placement of labels on sheet
sum_width = fit_x * label.size[0] + GUIDE_WIDTH * (fit_x+1)
sum_height = fit_y * label.size[1] + GUIDE_WIDTH * (fit_y+1)
start_x = floor(sheet.size[0] / 2 - sum_width / 2)
start_y = floor(sheet.size[1] / 2 - sum_height / 2)
# Draw grid guides
draw = ImageDraw.Draw(sheet)
grid_color = calculate_grid_color(label)
for y in range(start_y+1, start_y+sum_height+1, label.size[1]+GUIDE_WIDTH):
draw.line([(0, y), (sheet.size[0]-1, y)], fill=grid_color,
width=GUIDE_WIDTH)
for x in range(start_x+1, start_x+sum_width+1, label.size[0]+GUIDE_WIDTH):
draw.line([(x, 0), (x, sheet.size[1]-1)], fill=grid_color,
width=GUIDE_WIDTH)
# Place labels on sheet
for x in range(start_x+GUIDE_WIDTH, start_x+sum_width, label.size[0]+GUIDE_WIDTH):
for y in range(start_y+GUIDE_WIDTH, start_y+sum_height, label.size[1]+GUIDE_WIDTH):
sheet.paste(label, box=(x,y))
sheet.save(args.outputfile)
def parse_args():
parser = argparse.ArgumentParser(description='Tile beer labels on A4 pages')
parser.add_argument('-l', metavar='loglevel', type=str,
dest='loglevel', choices=logging._levelToName.values(),
default='WARNING',
help='The log level to use')
parser.add_argument('inputfile', metavar='LABELFILE', type=str,
help='The image file containing the beer label')
parser.add_argument('outputfile', metavar='OUTPUTFILE', type=str,
help='The file to write the resulting sheet to')
return parser.parse_args()
def all_pixels(image):
"""Yields all RGB triplets from an image (alpha channel is excluded)"""
pixels = image.load()
width, height = image.size
for x in range(0, width):
for y in range(0, height):
yield pixels[x,y][:3]
def most_common_color(image):
counter = Counter(all_pixels(image))
color, count = counter.most_common(1)[0]
return color
def calculate_grid_color(image):
"""
Calculates a grid color by getting the most common color in the image and
lightening it slightly
"""
common = most_common_color(image)
_logger.debug("grid: Most common color in label: %s", common)
hls = colorsys.rgb_to_hls(*[c/255 for c in common])
_logger.debug("grid: HLS conversion: %s", hls)
lightness = hls[1] * LIGHTNESS_FACTOR
hls = (hls[0], lightness, hls[2])
_logger.debug("grid: Modified HLS: %s", hls)
grid_color = tuple(round(i*255) for i in colorsys.hls_to_rgb(*hls))
_logger.debug("grid: Resulting RGB: %s", grid_color)
return grid_color
if __name__ == '__main__':
main()
| mit | Python | |
fdcdfb6f710be10cdead865b09d98b4bd0c0cebd | Create tests.py | word-killers/mark2down,word-killers/mark2down,word-killers/mark2down | tests.py | tests.py | pass
| mit | Python | |
6200bce410eb966b97a5edf2ea8efdcd94e736db | test script which creates a tun tunnel and prints what it received. | Gawen/pytun | tests.py | tests.py | import pytun
import logging
import select
def pprint_buf(buf):
""" Dirty & convenient function to display the hexademical
repr. of a buffer.
"""
DEFAULT_SIZE = 4
def hex2(i, l = None):
l = l if l is not None else DEFAULT_SIZE
h = hex(i).upper()[2:]
if len(h) != l:
h = "0" * (l - len(h)) + h
return h
def displayable_char(c):
if ord(c) < 0x20:
c = "."
return c
print " " * DEFAULT_SIZE,
for i in range(16): print hex2(i, 2),
print
raws = []
for i, c in enumerate(buf):
if i % 16 == 0:
if i:
print "\t" + "".join(raws)
raws = []
print hex2(i),
raws.append(displayable_char(c))
print hex2(ord(c), 2),
print " " * (15 - (i % 16)) + "\t" + "".join(raws)
def main():
pytun.logger.setLevel(logging.DEBUG)
logging.basicConfig()
try:
tun = pytun.TunTunnel()
except pytun.Tunnel.NotPermitted:
print
print "*" * 80
print "You do have the rights to access the file %s." % (pytun.TUN_KO_PATH, )
print "Give the access of this file to pytun, or if you trust me,"
print "elevate this current script to root level."
print "*" * 80
print
raise
print "*" * 80
print
print "OK. The tunnel '%s' had been created." % (tun.name, )
print
print "If you want to play with it, first configure it."
print
print "1. Set up the network and set an IP"
print " $ ifconfig %s 192.168.42.1" % (tun.name, )
print
print "2. Add the network route"
print " $ route add -net 192.168.42.0/24 dev %s" % (tun.name, )
print
print "Then, try to ping some IP in this network ..."
print " $ ping 192.168.42.42"
print
print "Or do some UDP netcat magic."
print " $ nc 192.168.42.42 4242 -u"
print
print "Enjoy !"
print
print "*" * 80
try:
while True:
buf = tun.recv()
pytun.logger.info("Packet received !")
pprint_buf(buf)
print
except KeyboardInterrupt:
print "Keyboard interrupt. Closing."
finally:
tun.close()
if __name__ == "__main__":
main()
| mit | Python | |
cc967aa97954be1614ca49489e1b97a940b2ef2b | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/sorting/easy/correctness_and_the_loop_invariant/py/solution.py | hackerrank/algorithms/sorting/easy/correctness_and_the_loop_invariant/py/solution.py | #!/bin/python
def insertion_sort(L):
for i in xrange(1, len(L)):
j = i - 1
key = L[i]
while (j >= 0) and (L[j] > key):
L[j+1], L[j] = L[j], L[j + 1]
j -= 1
m = input()
ar = [int(i) for i in raw_input().strip().split()]
insertion_sort(ar)
print " ".join(map(str,ar))
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.