commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
ce3a5186c8522cb0e8a2f3aa5e843846bb7f4e27 | Remove whitespace from the beginning and the end of the string | kumarisneha/practice_repo | techgig_strip.py | techgig_strip.py | def main():
a=raw_input()
print a.strip()
main()
| mit | Python | |
ac85219bec0eea5619ebec802e74382399b0f87c | Add a VERY simple redis returner | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/returners/redis.py | salt/returners/redis.py | '''
Return data to a redis server
This is a VERY simple example for pushing data to a redis server and is not
nessisarily intended as a usable interface.
'''
import redis
__opts__ = {
'redis.host': 'mcp',
'redis.port': 6379,
'redis.db': '0',
}
def returner(ret):
'''
Return data to a redis data store
'''
serv = redis.Redis(
host=__opts__['redis.host'],
port=__opts__['redis.port'],
db=__opts__['redis.db'])
serv.set(ret['id'] + ':' + red['jid'], str(ret['return']))
| apache-2.0 | Python | |
dfdaac63df7e4d8b381215fafd1f88c2af4781f2 | Update __openerp__.py | slevenhagen/odoomrp-wip-npg,diagramsoftware/odoomrp-wip,esthermm/odoomrp-wip,oihane/odoomrp-wip,ddico/odoomrp-wip,windedge/odoomrp-wip,michaeljohn32/odoomrp-wip,agaldona/odoomrp-wip-1,maljac/odoomrp-wip,diagramsoftware/odoomrp-wip,sergiocorato/odoomrp-wip,Endika/odoomrp-wip,dvitme/odoomrp-wip,StefanRijnhart/odoomrp-wip,jorsea/odoomrp-wip,odoomrp/odoomrp-wip,oihane/odoomrp-wip,Daniel-CA/odoomrp-wip-public,InakiZabala/odoomrp-wip,numerigraphe/odoomrp-wip,esthermm/odoomrp-wip,xpansa/odoomrp-wip,jobiols/odoomrp-wip,jobiols/odoomrp-wip,Eficent/odoomrp-wip,alhashash/odoomrp-wip,Eficent/odoomrp-wip,factorlibre/odoomrp-wip,factorlibre/odoomrp-wip,odoomrp/odoomrp-wip,invitu/odoomrp-wip,odoocn/odoomrp-wip,sergiocorato/odoomrp-wip,raycarnes/odoomrp-wip,alfredoavanzosc/odoomrp-wip-1,agaldona/odoomrp-wip-1,Daniel-CA/odoomrp-wip-public,Antiun/odoomrp-wip | sale_product_variants_types/__openerp__.py | sale_product_variants_types/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Sale product variants types",
"version": "1.0",
"depends": [
"sale_product_variants",
"product_attribute_types",
],
"author": "OdooMRP team",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
],
"category": "Hidden/Dependency",
"website": "http://www.odoomrp.com",
"summary": "",
"description": """
This module extends product variants on sales. It adds the possibility of
defining a custom value when the attribute is of range type.
""",
"data": [
"views/sale_order_view.xml",
],
"installable": True,
"auto_install": True,
}
| # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Sale product variants types",
"version": "1.0",
"depends": [
"sale_product_variants",
"product_attribute_types",
],
"author": "OdooMRP team",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
],
"category": "Hidden/Dependency",
"website": "http://www.odoomrp.com",
"summary": "",
"description": """
This module extends product variants on sales. It adds the possibility of
defining a custom value when the attribute is range type.
""",
"data": [
"views/sale_order_view.xml",
],
"installable": True,
"auto_install": True,
}
| agpl-3.0 | Python |
8275a7ccecfcb100b1575737944bde35f64949e9 | Add test for search in varaints query | mociepka/saleor,mociepka/saleor,mociepka/saleor | saleor/graphql/product/tests/test_variant_with_filtering.py | saleor/graphql/product/tests/test_variant_with_filtering.py | from decimal import Decimal
import pytest
from ....product.models import Product, ProductVariant
from ...tests.utils import get_graphql_content
QUERY_VARIANTS_FILTER = """
query variants($filter: ProductVariantFilterInput){
productVariants(first:10, filter: $filter){
edges{
node{
name
sku
}
}
}
}
"""
@pytest.fixture
def products_for_variant_filtering(product_type, category):
products = Product.objects.bulk_create(
[
Product(
name="Product1",
slug="prod1",
category=category,
product_type=product_type,
),
Product(
name="ProductProduct1",
slug="prod_prod1",
category=category,
product_type=product_type,
),
Product(
name="ProductProduct2",
slug="prod_prod2",
category=category,
product_type=product_type,
),
Product(
name="Product2",
slug="prod2",
category=category,
product_type=product_type,
),
Product(
name="Product3",
slug="prod3",
category=category,
product_type=product_type,
),
]
)
ProductVariant.objects.bulk_create(
[
ProductVariant(product=products[0], sku="P1-V1", price_amount=Decimal(15),),
ProductVariant(product=products[0], sku="P1-V2", price_amount=Decimal(15),),
ProductVariant(
product=products[1], sku="PP1-V1", price_amount=Decimal(15), name="XL"
),
ProductVariant(
product=products[2], sku="PP2-V1", price_amount=Decimal(15), name="XXL"
),
ProductVariant(product=products[3], sku="P2-V1", price_amount=Decimal(15),),
ProductVariant(product=products[4], sku="P3-V1", price_amount=Decimal(15),),
]
)
return products
@pytest.mark.parametrize(
"filter_by, variants",
[
({"search": "Product1"}, ["P1-V1", "P1-V2", "PP1-V1"]),
({"search": "Product3"}, ["P3-V1"]),
({"search": "XL"}, ["PP1-V1", "PP2-V1"]),
({"search": "XXL"}, ["PP2-V1"]),
({"search": "PP2-V1"}, ["PP2-V1"]),
({"search": "P1"}, ["P1-V1", "P1-V2", "PP1-V1"]),
],
)
def test_products_pagination_with_filtering(
filter_by,
variants,
staff_api_client,
permission_manage_products,
products_for_variant_filtering,
):
variables = {"filter": filter_by}
response = staff_api_client.post_graphql(
QUERY_VARIANTS_FILTER,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
products_nodes = content["data"]["productVariants"]["edges"]
for index, variant_sku in enumerate(variants):
assert variant_sku == products_nodes[index]["node"]["sku"]
assert len(variants) == len(products_nodes)
| bsd-3-clause | Python | |
96bbf25be25482a7edfd92ec9b956b0bbeab39c4 | Add a basic summary query implementation | agdsn/traffic-service-client,agdsn/traffic-service-client | src/traffic/__init__.py | src/traffic/__init__.py | from datetime import datetime
import zmq
from messages import common_pb2, replies_pb2, requests_pb2
class Connection(object):
def __init__(self, uri, context=None):
self._uri = uri
if context is None:
context = zmq.Context()
self._context = context
self._socket = self._context.socket(zmq.REQ)
self._connected = False
def connect(self):
assert not self._connected
self._socket.connect(self._uri)
self._connected = True
def disconnect(self):
assert self._connected
self._socket.disconnect(self._uri)
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def send_msg(self, msg):
assert self._connected
assert isinstance(msg, requests_pb2.Request)
self._socket.send(msg.SerializeToString())
reply = replies_pb2.Reply()
reply.ParseFromString(self._socket.recv())
return reply
def get_summary_unix(conn, interval_start, interval_end, clients):
assert interval_start < interval_end;
assert len(clients) != 0
request = requests_pb2.Request()
request.version=1
message = request.summary
message.range.start = interval_start
message.range.end = interval_end
message.addresses.extend(clients)
reply = conn.send_msg(request)
return reply.summary
def get_summary(conn, interval_start, interval_end, clients):
assert isinstance(interval_start, datetime)
assert isinstance(interval_end, datetime)
epoch = datetime(1970,1,1)
return get_summary_unix(conn,
int((interval_start - epoch).total_seconds()),
int((interval_end - epoch).total_seconds()),
clients)
| bsd-3-clause | Python | |
e9f88f1c43189fe429730c488f4514bf78edea4e | Add python -m mistune cli | lepture/mistune | mistune/__main__.py | mistune/__main__.py | import sys
import argparse
from . import (
create_markdown,
__version__ as version
)
def _md(args):
if args.plugin:
plugins = args.plugin
else:
# default plugins
plugins = ['strikethrough', 'footnotes', 'table', 'speedup']
return create_markdown(
escape=args.escape,
hard_wrap=args.hardwrap,
renderer=args.renderer,
plugins=plugins,
)
def _output(text, args):
if args.output:
with open(args.output, 'w') as f:
f.write(text)
else:
print(text)
CMD_HELP = '''Mistune, a sane and fast python markdown parser.
Here are some use cases of the command line tool:
$ python -m mistune -m "Hi **Markdown**"
<p>Hi <strong>Markdown</strong></p>
$ python -m mistune -f README.md
<p>...
'''
def cli():
parser = argparse.ArgumentParser(
prog='python -m mistune',
description=CMD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-m', '--message',
help='the markdown message to conver',
)
parser.add_argument(
'-f', '--file',
help='the markdown file to convert',
)
parser.add_argument(
'-p', '--plugin',
metavar='NAME',
action='extend',
nargs='+',
help='specifiy a plugin to use',
)
parser.add_argument(
'--escape',
action='store_true',
help='turn on escape option',
)
parser.add_argument(
'--hardwrap',
action='store_true',
help='turn on hardwrap option',
)
parser.add_argument(
'-o', '--output',
help='write the rendered result into file',
)
parser.add_argument(
'-r', '--renderer',
default='html',
help='specify the output renderer',
)
parser.add_argument('--version', action='version', version='mistune ' + version)
args = parser.parse_args()
if not args.message and not args.file:
print('You MUST specify a message or file')
return sys.exit(1)
if args.message:
md = _md(args)
text = md(args.message)
_output(text, args)
elif args.file:
md = _md(args)
text = md.read(args.file)[0]
_output(text, args)
if __name__ == '__main__':
cli()
| bsd-3-clause | Python | |
4af4d5d293d057bd12454200e7a1a72679c218a5 | Create zipatoconnection.py | johnbrannstrom/zipato-extension,johnbrannstrom/zipato-extension,johnbrannstrom/zipato-extension | src/zipatoconnection.py | src/zipatoconnection.py | import requests
class ZipatoConnection(Settings):
__init__(self, serial):
"""
Initializes a ZipatoConnection.
:param str serial: Zipato Box serial.
"""
self.serial = serial
def set_sensor_status(self, ep, apikey, status):
"""
Set status of a sensor.
:param str ep: Target ep.
:param str ep: Target apikey.
:param bool ep: Status value to set the sensor to.
"""
| mit | Python | |
3e345bc4a17cf53c40ef51cd2ae1732744be7e60 | Add custom form for editing and updating of decks | DummyDivision/Tsune,DummyDivision/Tsune,DummyDivision/Tsune | cardbox/deck_forms.py | cardbox/deck_forms.py | from django.forms import ModelForm
from django.forms.widgets import Textarea, TextInput
from deck_model import Deck
class DeckForm(ModelForm):
"""The basic form for updating or editing decks"""
class Meta:
model = Deck
fields = ('title', 'description')
widgets = {
'title': TextInput(attrs={'class': "form-control"}),
'description': Textarea(attrs={'class': "form-control"})
} | mit | Python | |
95d93518d664c9d8b095061bc854907c29f05623 | Add dummy keygen | ONSdigital/edcdi | tests/__init__.py | tests/__init__.py | from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, PublicFormat, BestAvailableEncryption
import os
f4 = 65537
os.environ['EQ_PUBLIC_KEY'] = './jwt-test-keys/sr-public.pem'
os.environ['EQ_PRIVATE_KEY'] = './jwt-test-keys/sr-private.pem'
os.environ['PUBLIC_KEY'] = './jwt-test-keys/sdx-public.pem'
os.environ['PRIVATE_KEY'] = './jwt-test-keys/sdx-private.pem'
os.environ['PRIVATE_KEY_PASSWORD'] = "digitaleq"
backend = default_backend()
eq_private_key = rsa.generate_private_key(
public_exponent=f4,
key_size=3072,
backend=default_backend()
)
eq_private_bytes = eq_private_key.private_bytes(
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=BestAvailableEncryption(b'digitaleq')
)
eq_public_key = eq_private_key.public_key().public_bytes(
encoding=Encoding.PEM,
format=PublicFormat.SubjectPublicKeyInfo
)
if not os.path.exists('./jwt-test-keys'):
os.mkdir('./jwt-test-keys')
f = open('./jwt-test-keys/sr-public.pem', 'w')
f.write(eq_public_key.decode('UTF8'))
f.close()
f = open('./jwt-test-keys/sr-private.pem', 'w')
f.write(eq_private_bytes.decode('UTF8'))
f.close()
sde_private_key = rsa.generate_private_key(
public_exponent=f4,
key_size=3072,
backend=default_backend()
)
sde_private_bytes = sde_private_key.private_bytes(
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=BestAvailableEncryption(b'digitaleq')
)
sde_public_key = sde_private_key.public_key().public_bytes(
encoding=Encoding.PEM,
format=PublicFormat.SubjectPublicKeyInfo
)
f = open('./jwt-test-keys/sdx-public.pem', 'w')
f.write(sde_public_key.decode('UTF8'))
f.close()
f = open('./jwt-test-keys/sdx-private.pem', 'w')
f.write(sde_private_bytes.decode('UTF8'))
f.close() | mit | Python | |
fa8b40b8ebc088f087ff76c36068fea67dae0824 | Add management command for updating genome coordinate names using Ensembl-INSDC mapping | RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode | rnacentral/portal/management/commands/update_coordinate_names.py | rnacentral/portal/management/commands/update_coordinate_names.py | """
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
| apache-2.0 | Python | |
6418807dbba9fb946ffeb05aee525c51c2e71f75 | Fix fixture, add doc string | MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug | tests/fixtures.py | tests/fixtures.py | """Defines fixtures that can be used to streamline tests and / or define dependencies"""
from random import randint
import pytest
import hug
@pytest.fixture
def hug_api():
"""Defines a dependency for and then includes a uniquely identified hug API for a single test case"""
return hug.API('fake_api_{}'.format(randint(0, 1000000)))
| mit | Python | |
6f9d04b3d894b4dc3178285f665342a249bbc17c | support script in python for bootstrapping erlang on a new erts | asceth/sinan,asceth/sinan | support/build.py | support/build.py | #! /bin/python
"""Support for building sinan, bootstraping it on a new version of erlang"""
import sys
import os
import commands
from optparse import OptionParser
class BuildError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
ERTS_VERSION = "5.6.3"
BUILD_PATH = "_build/development/apps/%s/ebin"
ERLWARE_PATH = "/usr/local/erlware"
ERLC = "erlc +debug_info "
LOCAL_APPS = [("etask", "0.5.0"),
("sinan", "0.10.0.14"),
("sinan_web_api", "0.1.0.4")]
ERLWARE_APPS = ["fconf-0.3.0.0",
"ktuo-0.4.0.1",
"crary-0.2.3",
"eunit-2.0",
"cryptographic-0.2.1",
"ewlib-0.8.2.0",
"ewrepo-0.18.6.0",
"gas-6.1.1",
"kernel-2.12.3",
"ibrowse-1.4",
"uri-0.2.0"]
def generate_local_path(app):
ebin = "_build/development/apps/%s-%s/ebin" % (app[0], app[1])
include = "_build/development/apps/%s-%s/include" % (app[0], app[1])
if not os.path.isdir(ebin):
raise BuildError(ebin + " is not a directory")
return " -pa %s -I %s " % (ebin, include)
def generate_erlware_path(path):
ebin = "%s/packages/%s/lib/%s/ebin" % (ERLWARE_PATH, ERTS_VERSION, path)
include = "%s/packages/%s/lib/%s/include" % (ERLWARE_PATH, ERTS_VERSION, path)
if not os.path.isdir(ebin):
raise BuildError(ebin + " is not a directory")
return " -pa %s -I %s " % (ebin, include)
def compile_app(app):
ebin = "_build/development/apps/%s-%s/ebin" % (app[0], app[1])
compile_command = ("erlc +debug_info %s %s -o %s/ ./server/%s/src/*.erl" %
(' '.join(map(generate_local_path, LOCAL_APPS)),
' '.join(map(generate_erlware_path, ERLWARE_APPS)),
ebin,
app[0]))
(status, out) = commands.getstatusoutput(compile_command)
if 0 != status:
raise BuildError(out)
def compile_apps():
for app in LOCAL_APPS:
compile_app(app)
def main():
parser = OptionParser()
parser.add_option("-e", "--erlware",
dest="erlware",
type="string",
default="/usr/local/erlware",
help="The location of Erlware")
(options, args) = parser.parse_args()
ERLWARE_PATH = options.erlware
compile_apps()
if __name__ == "__main__":
main()
| mit | Python | |
47c1dfd602281c56973de0d8afe64b923eb29592 | Add unit tests for env module. | dmotles/ebcf-alexa | test/test_env.py | test/test_env.py | from _ebcf_alexa import env
from unittest.mock import patch, call
import pytest
@pytest.yield_fixture
def mock_now():
with patch.object(env, 'now') as now:
yield now
@patch('datetime.datetime')
def test_now_is_utc(fake_datetime):
assert env.now()
assert fake_datetime.now.call_args == call(tz=env.UTC)
def test_local_now(mock_now):
assert env.localnow() == mock_now.return_value.astimezone.return_value
assert mock_now.return_value.astimezone.call_args == call(env.TZ)
def test_date(mock_now):
assert env.date() == mock_now.return_value.date.return_value
def test_local_date():
with patch.object(env, 'localnow') as ln:
assert env.localdate() == ln.return_value.date.return_value
| mit | Python | |
c727cee4dc579f5fe09b54877118a681a2597c47 | add tests for log module | ungarj/mapchete,ungarj/mapchete | test/test_log.py | test/test_log.py | """Test for custom logging functions."""
import logging
from mapchete.log import user_process_logger, driver_logger
def test_user_process_logger():
logger = user_process_logger(__name__)
assert isinstance(logger, logging.Logger)
assert logger.name == "mapchete.user_process.test_log"
def test_driver_logger():
logger = driver_logger(__name__)
assert isinstance(logger, logging.Logger)
assert logger.name == "mapchete.formats.drivers.test_log"
| mit | Python | |
fc9e9b4b9bdee1bd1f6b112c90772702cf60ad2d | Add a unittest-based test suite for scenarios | zaneb/heat-convergence-prototype | test_converge.py | test_converge.py | #!/usr/bin/env python
import functools
import logging
import unittest
import converge
import converge.processes
from converge.framework import datastore
from converge.framework import scenario
def with_scenarios(TestCase):
loader = unittest.defaultTestLoader
def create_test_func(generic_test, params):
@functools.wraps(generic_test)
def test_func(testcase, *args, **kwargs):
for key, value in params.items():
setattr(testcase, key, value)
return generic_test(testcase, *args, **kwargs)
return test_func
for test_name in loader.getTestCaseNames(TestCase):
base_test = getattr(TestCase, test_name)
for scenario in getattr(TestCase, 'scenarios', []):
name, parameters = scenario
test_func = create_test_func(base_test, parameters)
setattr(TestCase, '%s(%s)' % (test_name, name), test_func)
delattr(TestCase, test_name)
TestCase.scenarios = None
return TestCase
@with_scenarios
class ScenarioTest(unittest.TestCase):
scenarios = [(name, {'name': name, 'path': path})
for name, path in scenario.list_all('scenarios')]
def setUp(self):
super(ScenarioTest, self).setUp()
self.procs = converge.processes.Processes()
def tearDown(self):
datastore.Datastore.clear_all()
super(ScenarioTest, self).tearDown()
def test_scenario(self):
runner = scenario.Scenario(self.name, self.path)
runner(self.procs.event_loop,
**converge.scenario_globals(self.procs, self))
if __name__ == '__main__':
#converge.setup_log(logging.root)
unittest.main()
| apache-2.0 | Python | |
f65c6f3939c50326eea14bd0dadc77b7c9364dd2 | Add a module to deal with credentials | sigmaris/python-gssapi,sigmaris/python-gssapi,sigmaris/python-gssapi,sigmaris/python-gssapi | gssapi/creds.py | gssapi/creds.py | from __future__ import absolute_import
from ctypes import cast, byref, c_char_p, c_void_p, string_at
from .gssapi_h import (
GSS_C_NO_CREDENTIAL, GSS_C_NO_NAME, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_BOTH,
GSS_S_COMPLETE,
OM_uint32, gss_cred_id_t,
gss_init_sec_context, gss_accept_sec_context, gss_delete_sec_context, gss_release_buffer,
gss_release_cred, gss_release_name
)
from .error import GSSException, GSSMechException
from .names import MechName
class Credential(object):
"""Wraps a GSS credential handle (gss_cred_id_t)"""
def __init__(self, desired_name=GSS_C_NO_NAME, time_req=GSS_C_INDEFINITE,
desired_mechs=GSS_C_NO_OID_SET, cred_usage=GSS_C_BOTH):
super(Credential, self).__init__()
| mit | Python | |
0c64ad7f93fc1183ac51be7f1e311659fa070594 | Add som tests for the DB module | ivoire/ReactOBus,ivoire/ReactOBus | tests/test_db.py | tests/test_db.py | # -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
import datetime
import json
import pytest
import uuid
import zmq
from lib.db import DB, Message
class ZMQMockSocket(object):
def __init__(self):
self.connected = False
self.opts = {}
self.url = None
self.msgs = []
def setsockopt(self, key, value):
self.opts[key] = value
def connect(self, url):
self.connected = True
self.url = url
def recv_multipart(self):
return self.msgs.pop(0)
class ZMQMock(object):
def __init__(self):
self.sock = ZMQMockSocket()
def socket(self, sock_type):
return self.sock
def test_run(monkeypatch, tmpdir):
zmq_mock = ZMQMock()
def mock_zmq_context():
nonlocal zmq_mock
return zmq_mock
monkeypatch.setattr(zmq.Context, "instance", mock_zmq_context)
dbname = tmpdir.join('testing.sqlite3')
db_url = "sqlite:///%s" % dbname
db = DB({'url': db_url}, "inproc://test_run")
with pytest.raises(IndexError):
db.run()
assert zmq_mock.sock.connected is True
assert zmq_mock.sock.opts == {zmq.SUBSCRIBE: b''}
# Test that wrong message will not make the process crash
zmq_mock.sock.msgs = [[]]
with pytest.raises(IndexError):
db.run()
# Check that the db is empty
session = db.sessions()
assert session.query(Message).count() == 0
# Test that wrong message will not make the process crash
zmq_mock.sock.msgs = [
["org.reactobus.1", str(uuid.uuid1()),
datetime.datetime.utcnow().isoformat(),
"lavaserver", json.dumps({})],
["org.reactobus.2", str(uuid.uuid1()),
datetime.datetime.utcnow().isoformat(),
"lavaserver", json.dumps({})],
["org.reactobus.3", str(uuid.uuid1()),
datetime.datetime.utcnow().isoformat(),
"lavaserver", json.dumps({})],
["org.reactobus.4", str(uuid.uuid1()),
"2016/01/01",
"lavaserver", json.dumps({})],
["org.reactobus.5", str(uuid.uuid1()),
datetime.datetime.utcnow().isoformat(),
"lavaserver", json.dumps({})]
]
with pytest.raises(IndexError):
db.run()
# Check that the db is empty
session = db.sessions()
assert session.query(Message).count() == 4
assert session.query(Message).get(1).topic == "org.reactobus.1"
assert session.query(Message).get(2).topic == "org.reactobus.2"
assert session.query(Message).get(3).topic == "org.reactobus.3"
assert session.query(Message).get(4).topic == "org.reactobus.5"
| agpl-3.0 | Python | |
c88b95bd28b1ece65fc4631f73e95dac5b48f038 | Add new py-fixtures package (#14026) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/py-fixtures/package.py | var/spack/repos/builtin/packages/py-fixtures/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFixtures(PythonPackage):
"""Fixtures, reusable state for writing clean tests and more."""
homepage = "https://launchpad.net/python-fixtures"
url = "https://pypi.io/packages/source/f/fixtures/fixtures-3.0.0.tar.gz"
version('3.0.0', sha256='fcf0d60234f1544da717a9738325812de1f42c2fa085e2d9252d8fff5712b2ef')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python | |
9692c1494e52238fdbc388ef5aba4ae551b46a88 | Create ohmycoins.py | Lavande/ohmycoins,Lavande/ohmycoins | ohmycoins.py | ohmycoins.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
#Put your Ether addresses here in the list
addresses = []
#Etherscan
def get_ether(address):
url = 'https://etherscan.io/address/' + address
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
eth = soup.find_all('table')[0].find_all('td')[1].text.replace('\n','').split(' ')[0]
eth = float(eth.replace(',', ''))
assets = {'ETH': eth}
balancelist = soup.find(id='balancelist')
for i in balancelist.find_all('li')[:-1]:
br = i.a.br.text.split('@')[0]
token = br.split(' ')[1]
amount = float(br.split(' ')[0].replace(',', ''))
if token in assets.keys():
print('Warning: Duplicated token symbol {0}. Using the first one.'.format(token))
continue
assets[token] = amount
return assets
def dict_add(a, b):
for k2, v2 in b.items():
if k2 in a.keys():
a[k2] += v2
else:
a[k2] = v2
return a
mycoins = {}
for address in addresses:
assets = get_ether(address)
mycoins = dict_add(mycoins, assets)
| mit | Python | |
2ad504a1a40e08aea3105642821190f9b928fab7 | create tags package | avelino/django-tags | tags/__init__.py | tags/__init__.py | VERSION = (0, 1, 0, 'dev', 1)
| mit | Python | |
a317e86e0faab308421588f649f6dd7ba65cd03b | Add rscommon/pickle_.py | rstebbing/common,rstebbing/common | rscommon/pickle_.py | rscommon/pickle_.py | ##########################################
# File: pickle_.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import cPickle
# dump
def dump(path, obj, raise_on_failure=True):
own_fid = False
if isinstance(path, basestring):
path = open(path,'w+b')
own_fid = True
try:
cPickle.dump(obj, path, cPickle.HIGHEST_PROTOCOL)
except IOError:
if raise_on_failure:
raise
finally:
if own_fid:
try:
path.close()
except IOError:
pass
# load
def load(file_):
own_fid = False
if isinstance(file_, basestring):
file_ = open(file_,'rb')
own_fid = True
obj = cPickle.load(file_)
if own_fid:
file_.close()
return obj
| mit | Python | |
2a832e8a9a0881200756db5aa99650745c0ecc16 | rename to | beagleterm/beagle-term,beagleterm/beagle-term,RandomlyKnighted/beagle-term,RandomlyKnighted/beagle-term | tools/packing.py | tools/packing.py | #!/usr/bin/env python
import os
import shutil
import subprocess
from contextlib import contextmanager
@contextmanager
def pushd(path):
currentDir = os.getcwd()
os.chdir(path)
yield
os.chdir(currentDir)
def printInfo(message):
print os.path.basename(__file__) + ' >> ' + message
def installDependencies():
printInfo('Start bower install')
with pushd('app'):
subprocess.check_call('bower install', shell=True)
def copy(src, des):
printInfo('Copying ' + src + ' to ' + des)
if os.path.isdir(src):
shutil.copytree(src, des)
else:
if not os.path.exists(os.path.dirname(des)):
os.makedirs(os.path.dirname(des))
shutil.copy2(src, des)
def zip(destFile):
printInfo('Start zipping build directory')
shutil.make_archive(destFile, 'zip', 'build')
printInfo('Created ' + destFile + '.zip')
def copyDeployFiles():
os.makedirs('build')
source_dir = os.path.join(os.getcwd(), 'app')
target_dir = os.path.join(os.getcwd(), 'build')
copy_list = ['index.html', 'manifest.json', 'js/index.js', 'js/background.js', 'js/lib/hterm_all.min.js',
'css/main.css', 'images', 'bower_components/jquery/dist/jquery.min.js',
'bower_components/bootstrap/dist/js/bootstrap.min.js',
'bower_components/bootstrap/dist/css/bootstrap.min.css']
for single_file in copy_list:
source_path = os.path.join(source_dir, single_file)
target_path = os.path.join(target_dir, single_file)
copy(source_path, target_path)
def main():
installDependencies()
copyDeployFiles()
# TODO: Minify js/index.js, js/background.js
zip('archive')
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import os
import shutil
import subprocess
from contextlib import contextmanager
@contextmanager
def pushd(path):
currentDir = os.getcwd()
os.chdir(path)
yield
os.chdir(currentDir)
def printInfo(message):
print os.path.basename(__file__) + ' >> ' + message
def installDependencies():
printInfo('Start bower install')
with pushd('app'):
subprocess.check_call('bower install', shell=True)
def copy(src, des):
printInfo('Copying ' + src + ' to ' + des)
if os.path.isdir(src):
shutil.copytree(src, des)
else:
if not os.path.exists(os.path.dirname(des)):
os.makedirs(os.path.dirname(des))
shutil.copy2(src, des)
def zip(destFile):
printInfo('Start zipping deploy directory')
shutil.make_archive(destFile, 'zip', 'deploy')
printInfo('Created ' + destFile + '.zip')
def copyDeployFiles():
os.makedirs('deploy')
source_dir = os.path.join(os.getcwd(), 'app')
target_dir = os.path.join(os.getcwd(), 'deploy')
copy_list = ['index.html', 'manifest.json', 'js/index.js', 'js/background.js', 'js/lib/hterm_all.min.js',
'css/main.css', 'images', 'bower_components/jquery/dist/jquery.min.js',
'bower_components/bootstrap/dist/js/bootstrap.min.js',
'bower_components/bootstrap/dist/css/bootstrap.min.css']
for single_file in copy_list:
source_path = os.path.join(source_dir, single_file)
target_path = os.path.join(target_dir, single_file)
copy(source_path, target_path)
def main():
installDependencies()
copyDeployFiles()
# TODO: Minify js/index.js, js/background.js
zip('archive')
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
5bea29f6590adad3479a994dd141dd459350063c | add draft cryptogram analysis tool | amattheisen/crypto | cryptogram.py | cryptogram.py | #!/usr/bin/env python
"""Cryptogram.
Description:
This script statistically analyses a line of text to help solve a cryptogram
Author:
Andrew Mattheisen
Usage:
cryptogram.py <cyphertext>...
cryptogram.py (-h | --help)
cryptogram.py --version
Options:
-h --help Show this screen.
--version Show version.
Additional features to implement:
Look for prefixes
{ex-, over-, un-, or up-}
Look for suffexes
{-ed, -er, -man or -men, or -ful}
Look for 1 letter words (If spaces are provided)
These are most likely {I, a}
Identify the vowels
1 letter words
double vowels are usually {ee, aa}
the most common vowell is 'e' and the least common is 'u'
cyphertext of interest:
tpfccdlfdtte pcaccplircdt dklpcfrp?qeiq lhpqlipqeodf gpwafopwprti izxndkiqpkii krirrifcapnc dxkdciqcafmd vkfpcadf.
cyphertext source:
tweet from NSA https://twitter.com/NSACareers/status/463321993878994945
source of strategies:
http://www.bigfishgames.com/blog/cryptogram-tips-tricks-and-strategies/
"""
from __future__ import print_function
from docopt import docopt
import string
VERSION='1.0'
DATE='2014-05-06'
NAME='Cryptogram'
def main(args):
# BUILD CYPHERTEXT
cyphertext = ""
for cypher in args['<cyphertext>']:
cypher = cypher.upper()
cyphertext += cypher
print("")
prettyprint_text('INPUT TEXT', cyphertext)
# COUNTS
cypher_char_counts = count_chars(cyphertext)
prettyprint_counts(cypher_char_counts)
# DOUBLES
highlight_doubles(cyphertext)
return
def highlight_doubles(text):
print("== DOUBLES ==")
last_c = ""
doubles = ""
for ii,c in enumerate(text):
if ii !=0 and ii%80 == 0:
print('\n', doubles)
doubles = ""
if c == last_c:
doubles = doubles[:-2] + '^^ '
else:
doubles += ' '
print(c, sep='', end='')
last_c = c
if len(doubles)>0:
print('\n', doubles)
print("The most common English doubles are {ll, tt, ss, ee, pp, oo, rr, ff, cc, dd, nn}", '\n')
return
def prettyprint_counts(counts):
print("== COUNTS ==")
sorted_keys = sorted(counts.iteritems(), key=lambda (k,v): (v,k), reverse=True)
for ii,(key,value) in enumerate(sorted_keys):
if ii !=0 and ii%10 == 0:
print("")
print(" %1s:%3d"%(key, value), sep='', end=''),
print("\nThe most common English letters are {e, t, s, d, n, r, y}",'\n')
return
def prettyprint_text(title, text):
print("== %s =="%title)
for ii,c in enumerate(text):
if ii !=0 and ii%80 == 0:
print("")
print(c, sep='', end='')
print("\n")
return
def count_chars(cyphertext):
counts = {}
for c in cyphertext:
if not c in string.ascii_letters:
# excludes numbers, symbols, spaces, and punctuation
continue
if c not in counts.keys():
counts[c] = 1
else:
counts[c] += 1
return counts
if __name__ == '__main__':
args = docopt(__doc__, version='%s %s:%s'%(NAME, VERSION, DATE))
main(args)
| apache-2.0 | Python | |
0590adbbd9325c0d9a9595dfac62caae05dd43e0 | Add leetcode 061 solution | aiden0z/snippets,aiden0z/snippets,aiden0z/snippets,aiden0z/snippets,aiden0z/snippets,aiden0z/snippets | leetcode/061_rotate_list.py | leetcode/061_rotate_list.py | """ Rotate List
Given a linked list, rotate the list to the right by k places,
where k is non-negative.
Example 1:
Input: 1 -> 2 -> 3 -> 4 -> 5 -> NULL, k = 2
Output: 4 -> 5 -> 1 -> 2 -> 3 -> NULL
Explation:
rotate 1 steps to the right: 5 -> 1 -> 2 -> 3 -> 4 -> NULL
rotate 2 steps to the right: 4 -> 5 -> 1 -> 2 -> 3 -> NULL
Example 2:
Input: 0 -> 1 -> 2 -> NULL, k = 4
Output: 2 -> 0 -> 1 -> NULL
Explanation:
rotate 1 steps to the right: 2 -> 0-> 1-> NULL
rotate 2 steps to the right: 1 -> 2-> 0-> NULL
rotate 3 steps to the right: 0 -> 1-> 2-> NULL
rotate 4 steps to the right: 2 -> 0-> 1-> NULL
"""
from typing import List
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def toString(self):
values = [self.val]
next = self.next
while next is not None:
values.append(next.val)
next = next.next
return '=>'.join(map(str, values))
@classmethod
def createList(cls, ints: List[int]) -> 'ListNode':
head = cls(ints[0])
if len(ints) == 1:
return head
node = head
for i in ints[1:]:
node.next = ListNode(i)
node = node.next
return head
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if head is None:
return None
if head.next is None:
return head
length = 1
tail = head
while tail.next is not None:
tail = tail.next
length += 1
# create a cycle list
tail.next = head
i = length - k % length
new_head = head
while i > 0:
new_head = new_head.next
tail = tail.next
i -= 1
# break cycle list
tail.next = None
return new_head
if __name__ == '__main__':
cases = [
((ListNode.createList([1, 2, 3, 4, 5]), 1), ListNode.createList([5, 1, 2, 3, 4])),
((ListNode.createList([1, 2, 3, 4, 5]), 2), ListNode.createList([4, 5, 1, 2, 3])),
((ListNode.createList([0, 1, 2]), 4), ListNode.createList([2, 0, 1])),
((ListNode.createList([0, 1, 2]), 3), ListNode.createList([0, 1, 2])),
((ListNode.createList([0, 1, 2]), 2), ListNode.createList([1, 2, 0])),
((ListNode.createList([0, 1, 2]), 1), ListNode.createList([2, 0, 1])),
] # yapf: disable
for case in cases:
for S in [Solution]:
# print(S().rotateRight(case[0][0], case[0][1]).toString())
# print(case[1].toString())
assert S().rotateRight(case[0][0], case[0][1]).toString() == case[1].toString()
| mit | Python | |
8e8c14446a0089ee7fa57cfd5520c7d6d6e2711e | Add Python user customization file. | fossilet/dotfiles,fossilet/dotfiles,fossilet/dotfiles | usercustomize.py | usercustomize.py | """ Customize Python Interpreter.
Link your user customizing file to this file.
For more info see: https://docs.python.org/3/library/site.html
"Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and
non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages
for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on
Windows."
Sun May 4 18:06:08 CST 2014
"""
import cgitb
cgitb.enable(format='text')
| mit | Python | |
13be4749aef2415ab84ffbd090c5b24d8ed98af5 | Add test case of BloArticle class | 10nin/blo,10nin/blo | tests/TestBloArticle.py | tests/TestBloArticle.py | import unittest
from blo.BloArticle import BloArticle
class TestBloArticle(unittest.TestCase):
def setUp(self):
self.blo_article = BloArticle()
def test_failed_load_from_file(self):
file_path = ""
with self.assertRaises(FileNotFoundError):
self.blo_article.load_from_file(file_path)
def test_success_load_from_file(self):
file_path = "./test_article_1.md"
self.assertIsNone(self.blo_article.load_from_file(file_path))
self.assertFalse(self.blo_article._raw_text == "")
def test_convert_to_html(self):
pass
def test_get_digest(self):
pass
| mit | Python | |
4442fabf9292efa44a82f420e2d3e807d7d15b04 | Add more tests to cli | eiginn/passpie,eiginn/passpie,scorphus/passpie,scorphus/passpie,marcwebbie/passpie,marcwebbie/passpie | tests/test_cli.py | tests/test_cli.py | from click.testing import CliRunner
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
import pytest
try:
import mock
except ImportError:
from unittest import mock
from passpie import cli
@pytest.fixture
def mock_db(mocker):
credentials = [
{'login': 'foo', 'name': 'bar', 'fullname': 'foo@bar',
'password': '', 'comment': ''},
{'login': 'foa', 'name': 'bazzy', 'fullname': 'foa@bazzy',
'password': '', 'comment': ''},
{'login': 'spam', 'name': 'egg', 'fullname': 'spam@egg',
'password': '', 'comment': ''},
]
database = TinyDB(storage=MemoryStorage)
database.insert_multiple(credentials)
MockDB = mock.MagicMock(return_value=database)
mocker.patch('passpie.cli.Database', MockDB)
return database
def test_cli_search_find_results_by_login_regex(mock_db):
runner = CliRunner()
result = runner.invoke(cli.search, ['fo[oa]'])
assert result.exit_code == 0
assert 'foo' in result.output
assert 'foa' in result.output
assert 'spam' not in result.output
def test_cli_remove_delete_credential_found_by_database(mock_db):
runner = CliRunner()
result = runner.invoke(cli.remove, ['foo@bar'], input='y')
result_print = runner.invoke(cli.cli)
assert result.exit_code == 0
assert 'foo' not in result_print.output
def test_cli_add_credential_to_database(mock_db):
fullname = 'test_user@example'
runner = CliRunner()
result = runner.invoke(cli.add, [fullname, '--random'])
assert result.exit_code == 0
assert mock_db.get(where('fullname') == fullname)
def test_cli_copy_credential_password_to_database(mocker, mock_db):
fullname = 'foo@bar'
password = 's3cr3t'
mocker.patch('passpie.cli.ensure_passphrase')
mock_pyperclip = mocker.patch('passpie.cli.pyperclip')
mocker.patch('passpie.cli.Cryptor.decrypt',
mock.Mock(return_value=password))
runner = CliRunner()
result = runner.invoke(cli.copy, [fullname], input='passphrase')
assert result.exit_code == 0
assert mock_pyperclip.copy.called
mock_pyperclip.copy.assert_called_once_with(password)
| mit | Python | |
b2e10a344a940ae2cce9656c435c7a6f4919a53b | add cli invoke tests | NLNOG/bgpfu,grizz/bgpfu,bgpfu/bgpfu | tests/test_cli.py | tests/test_cli.py |
import pytest
from click.testing import CliRunner
import bgpfu.cli
def test_cli_invoke():
runner = CliRunner()
res = runner.invoke(bgpfu.cli.cli, ['as_set'])
res = runner.invoke(bgpfu.cli.cli, ['prefixlist'])
res = runner.invoke(bgpfu.cli.cli, ['raw'])
| apache-2.0 | Python | |
c659f31cfb3eadd66838036ea285070f564fdced | Add rendering test | kmike/psd-tools,psd-tools/psd-tools,kmike/psd-tools | tests/test_rendering.py | tests/test_rendering.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from PIL.Image import Image
from psd_tools.user_api.psd_image import PSDImage, merge_layers
from tests.utils import decode_psd, full_name
CLIP_FILES = [
('clipping-mask.psd',),
('clipping-mask2.psd',)
]
@pytest.mark.parametrize(("filename",), CLIP_FILES)
def test_render_clip_layers(filename):
psd = PSDImage.load(full_name(filename))
image1 = psd.as_PIL()
image2 = psd.as_PIL_merged()
assert isinstance(image1, Image)
assert isinstance(image2, Image)
| mit | Python | |
436719050ada475d840004a49c693d08c3f92034 | Add a widget for line editors. | pnomolos/greatbigcrane,pnomolos/greatbigcrane | greatbigcrane/project/widgets.py | greatbigcrane/project/widgets.py | from django.forms.widgets import Textarea
from django.utils.safestring import mark_safe
class LineEditorWidget(Textarea):
class Media:
js = ('js/jquery-1.4.2.min.js' ,'js/jquery.lineeditor.js')
def render(self, name, value, attrs=None):
if isinstance(value,list):
value = "\n".join(value)
rendered = super(LineEditorWidget, self).render(name, value, attrs)
return rendered + mark_safe(u'''<script type="text/javascript" defer="defer">
$(function() {
$("#id_%s").lineeditor();
}
);
</script>''' % (name))
| apache-2.0 | Python | |
52d03e19bd61dcba56d1d7fc3944afcc6d9b6a8d | make nautilus use backspace for back | keithieopia/dotfiles,keithieopia/dotfiles | .local/share/nautilus-python/extensions/BackspaceBack.py | .local/share/nautilus-python/extensions/BackspaceBack.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# by Ricardo Lenz, 2016-jun
# riclc@hotmail.com
#
import os, gi
gi.require_version('Nautilus', '3.0')
from gi.repository import GObject, Nautilus, Gtk, Gio, GLib
def ok():
app = Gtk.Application.get_default()
app.set_accels_for_action( "win.up", ["BackSpace"] )
#print app.get_actions_for_accel("BackSpace")
#print app.get_actions_for_accel("<alt>Up")
class BackspaceBack(GObject.GObject, Nautilus.LocationWidgetProvider):
def __init__(self):
pass
def get_widget(self, uri, window):
ok()
return None
| mit | Python | |
e778f67101a9ba8e38e249263d49738d3239f557 | test select prefix cursor | hardc0d3/sppy,hardc0d3/sppy,hardc0d3/sppy | test_p_cursor.py | test_p_cursor.py |
import sys
sys.path.append('./build/lib.linux-x86_64-2.7/')
from voidptr import VoidPtr as vp
import spapi as sp
env = vp("env")
ctl = vp("ctl")
db = vp("db")
o = vp("o")
t = vp("t")
print "env", sp.env(env)
print "env,ctl", sp.ctl(env,ctl)
print "ctl_set", sp.ctl_set(ctl,"sophia.path","./test_data")
print "ctl_set", sp.ctl_set(ctl,"db","test_p")
print "open env", sp.open(env)
print "db", sp.db(ctl,"db.test_p",db)
print "transaction begin", sp.begin(env,t)
for j in xrange (6)
for k in xrange(6):
print "prepare obj" , sp.obj(db, o)
key = "a%db%dc%d" % (k,k,k)
print "key -> ",key
print "set field",sp.set_field(o,"key",key )
print "set fieled",sp.set_field(o,"value","abcd%d" % k)
print "db set obj",sp.db_set( t, o )
print "transaction commit", sp.commit(t)
cursor = vp("cursor")
#cursor2 = vp("cursor2")
#o2 = vp("o2")
#print "obj db o2" , sp.obj(db, o2)
print "obj db o" , sp.obj(db, o)
print "set key ", sp.set_field(o,"key","a5")
print "set order > to o",sp.set_field(o,"order",">")
#print "set order > to o2",sp.set_field(o2,"order",">")
print "inti cursors"
print cursor.tag, sp.cursor(db,o,cursor)
#print cursor2.tag, sp.cursor(db,o2,cursor2)
while sp.cursor_get(cursor,o):
print sp.get_field(o,"key")
#if sp.cursor_get(cursor2,o2):
# print sp.get_field(o2,"key")
| bsd-2-clause | Python | |
2a1777a74d6f2cba61485f281f0c048cbbdca727 | Add valgrind tests file. | CWSL/access-om | test_valgrind.py | test_valgrind.py |
from __future__ import print_function
import shutil
import os
from model_test_helper import ModelTestHelper
tests = {'om_360x300-valgrind' : ('om'),
'cm_360x300-valgrind' : ('cm')}
class TestValgrind(ModelTestHelper):
"""
Run the model in valgrind.
"""
def __init__(self):
super(TestBasicRun, self).__init__()
def pre_run_cleanup(self, exp):
paths = self.make_paths(exp)
try:
shutil.rmtree(paths['archive'])
os.remove(paths['archive_link'])
except OSError, e:
if not e.strerror == 'No such file or directory':
raise e
def check_run(self, key):
print('############ Running {} ############'.format(key))
self.pre_run_cleanup(key)
self.do_basic_access_run(key, model=tests[key][0])
# FIXME: check that valgrind does not find any problems.
def test_runs(self):
for k in tests.keys():
yield self.check_run, k
| apache-2.0 | Python | |
f9273e7b905bdc94f3e161b17225a11120810b26 | handle core serice by self-defined-class | leVirve/GooTool | google_service.py | google_service.py | import httplib2
import os
import oauth2client
from apiclient import discovery
from oauth2client import client, tools
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
class Gooooogle():
def __init__(self):
self.credentials = self._get_credentials()
self.service = self._new_service()
def _get_credentials(self):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
credential_dir = os.path.join('./', '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, self.CREDENTIALS_NAME)
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
self.CLIENT_SECRET_FILE, self.SCOPES)
flow.user_agent = self.APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
return credentials
def _new_service(self):
"""Creates a Google Drive API service object.
"""
httplib = self.credentials.authorize(httplib2.Http())
return discovery.build(
self.API_NAME,
self.API_VERSION,
http=httplib)
| mit | Python | |
556530f4933b1323ef8e4414c324a0aa2d0b81bd | Add the example bundles. | juju/juju-gui-charm,juju/juju-gui-charm | tests/example.py | tests/example.py | # This file is part of the Juju GUI, which lets users view and manage Juju
# environments within a graphical interface (https://launchpad.net/juju-gui).
# Copyright (C) 2013 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Example data used in tests."""
BUNDLE1 = """
bundle1:
series: precise
services:
wordpress:
charm: "cs:precise/wordpress-15"
num_units: 1
options:
debug: "no"
engine: nginx
tuning: single
"wp-content": ""
annotations:
"gui-x": 313
"gui-y": 51
mysql:
charm: "cs:precise/mysql-26"
num_units: 1
options:
"binlog-format": MIXED
"block-size": "5"
"dataset-size": "80%"
flavor: distro
"ha-bindiface": eth0
"ha-mcastport": "5411"
"max-connections": "-1"
"preferred-storage-engine": InnoDB
"query-cache-size": "-1"
"query-cache-type": "OFF"
"rbd-name": mysql1
"tuning-level": safest
vip: ""
vip_cidr: "24"
vip_iface: eth0
annotations:
"gui-x": 669.5
"gui-y": -33.5
relations:
- - "wordpress:db"
- "mysql:db"
"""
BUNDLE2 = """
bundle2:
series: precise
services:
mediawiki:
charm: "cs:precise/mediawiki-9"
num_units: 1
options:
admins: ""
debug: false
logo: ""
name: Please set name of wiki
skin: vector
annotations:
"gui-x": 432
"gui-y": 120
relations: []
"""
| agpl-3.0 | Python | |
1dec974693222864537b20b31ac33656bea92912 | add LogFactory | hezhiming/py3utils,hezhiming/py3utils | py3utils/_log.py | py3utils/_log.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# date: 2018/4/15
# author: he.zhiming
#
from __future__ import unicode_literals, absolute_import
import logging
import logging.config
from logging import handlers
class LogFactory:
_LOG_CONFIG_DICT = {
}
logging.config.dictConfig(_LOG_CONFIG_DICT)
@classmethod
def get_logger(cls, logger_name) -> logging.Logger:
return logging.getLogger(logger_name)
DEBUGGER = LogFactory.get_logger('debugger')
CONSOLE_LOGGER = LogFactory.get_logger('console_logger')
| mit | Python | |
6f9d02510ad861bf8ae5ad8f1ae335a4e565756d | Add initial unit tests for io module | PyCQA/isort,PyCQA/isort | tests/test_io.py | tests/test_io.py | from unittest.mock import MagicMock, patch
import pytest
from isort import io
class TestFile:
def test_read(self, tmpdir):
test_file_content = """# -*- encoding: ascii -*-
import ☺
"""
test_file = tmpdir.join("file.py")
test_file.write(test_file_content)
# able to read file even with incorrect encoding, if it is UTF-8 compatible
assert io.File.read(test_file).contents == test_file_content
# unless the locale is also ASCII
with pytest.raises(io.UnableToDetermineEncoding):
with patch("locale.getpreferredencoding", lambda value: "ascii"):
io.File.read(test_file).contents
| mit | Python | |
d94260f0be472d2c163e9ae57aacc25a8e9f2519 | Package contrib | google-research/t5x | t5x/contrib/__init__.py | t5x/contrib/__init__.py | # Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This empty file is needed for packaging the contrib modules."""
| apache-2.0 | Python | |
b0470bbde7c477e8f176fa0529a1d90eca85caba | Add survey support and use it for IPP. | benjello/openfisca-france,SophieIPP/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france,SophieIPP/openfisca-france,adrienpacifico/openfisca-france,benjello/openfisca-france,antoinearnoud/openfisca-france,antoinearnoud/openfisca-france,adrienpacifico/openfisca-france | openfisca_france/surveys.py | openfisca_france/surveys.py | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import numpy as np
from openfisca_core import simulations
from . import entities
def new_simulation_from_survey_data_frame(compact_legislation = None, debug = False, survey = None, tax_benefit_system = None, year = None):
simulation = simulations.Simulation(
compact_legislation = compact_legislation,
date = datetime.date(year, 1, 1),
debug = debug,
tax_benefit_system = tax_benefit_system,
)
column_by_name = tax_benefit_system.column_by_name
for column_name, series in survey.iteritems():
assert column_name in column_by_name, column_name
familles = entities.Familles(simulation = simulation)
familles.count = familles.step_size = familles_step_size = (survey.quifam == 0).sum()
foyers_fiscaux = entities.FoyersFiscaux(simulation = simulation)
foyers_fiscaux.count = foyers_fiscaux.step_size = foyers_fiscaux_step_size = (survey.quifoy == 0).sum()
individus = entities.Individus(simulation = simulation)
individus.count = individus.step_size = individus_step_size = len(survey)
menages = entities.Menages(simulation = simulation)
menages.count = menages.step_size = menages_step_size = (survey.quimen == 0).sum()
assert 'age' in survey.columns
assert 'agem' in survey.columns
assert 'idfam' in survey.columns
assert 'idfoy' in survey.columns
assert 'idmen' in survey.columns
assert 'noi' in survey.columns
assert 'quifam' in survey.columns
assert 'quifoy' in survey.columns
assert 'quimen' in survey.columns
familles.roles_count = survey['quifam'].max() + 1
menages.roles_count = survey['quimen'].max() + 1
foyers_fiscaux.roles_count = survey['quifoy'].max() + 1
simulation.set_entities(dict(
familles = familles,
foyers_fiscaux = foyers_fiscaux,
individus = individus,
menages = menages,
))
for column_name, column_series in survey.iteritems():
holder = simulation.new_holder(column_name)
entity = holder.entity
if holder.entity.is_persons_entity:
array = column_series.values
else:
array = column_series.values[survey['qui' + entity.symbol].values == 0]
assert array.size == entity.count, 'Bad size for {}: {} instead of {}'.format(column_name, array.size,
entity.count)
holder.array = np.array(array, dtype = holder.column._dtype)
return simulation
| agpl-3.0 | Python | |
0f32a1e193a0064e5d5313cdc205d15cea71f1e7 | Test for a long hippo scrolling view. | sugarlabs/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,tchx84/debian-pkg-sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,sugarlabs/sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,sugarlabs/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,tchx84/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,manuq/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,ceibal-tatu/sugar-toolkit,i5o/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,puneetgkaur/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,i5o/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3 | tests/graphics/hipposcalability.py | tests/graphics/hipposcalability.py | import hippo
import gtk
import gobject
from sugar.graphics.icon import CanvasIcon
from sugar.graphics.roundbox import CanvasRoundBox
import common
test = common.Test()
canvas = hippo.Canvas()
test.pack_start(canvas)
canvas.show()
scrollbars = hippo.CanvasScrollbars()
canvas.set_root(scrollbars)
box = hippo.CanvasBox(padding=10, spacing=10)
scrollbars.set_root(box)
def idle_cb():
global countdown
for i in range(0, 100):
entry = CanvasRoundBox(padding=10, spacing=10)
for j in range(0, 3):
icon = CanvasIcon(icon_name='go-left')
entry.append(icon)
for j in range(0, 2):
text = hippo.CanvasText(text='Text %s %s' % (countdown, j))
entry.append(text)
box.append(entry)
countdown -= 1
return countdown > 0
countdown = 1000
gobject.idle_add(idle_cb)
test.show()
if __name__ == "__main__":
common.main(test)
| lgpl-2.1 | Python | |
f0204e3061b110028fde5312fdb7b613e361b16e | Create output.py | RonsenbergVI/trendpy,RonsenbergVI/trendpy | trendpy/output.py | trendpy/output.py | # -*- coding: utf-8 -*-
# output.py
# MIT License
# Copyright (c) 2017 Rene Jean Corneille
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| mit | Python | |
a78fe01101de6143885f2559a519024a86d97315 | Add new command dev_guess_downloader. | AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com | allmychanges/management/commands/dev_guess_downloader.py | allmychanges/management/commands/dev_guess_downloader.py | # coding: utf-8
from django.core.management.base import BaseCommand
from twiggy_goodies.django import LogMixin
from allmychanges.downloader import guess_downloader
class Command(LogMixin, BaseCommand):
help = u"""Command to test how downloader guesser workds for given url."""
def handle(self, *args, **options):
url = args[0]
print guess_downloader(url)
| bsd-2-clause | Python | |
c5dbebe13e2c1c7018a1701e3c8e37ae29f9a387 | add solution for Reverse Integer | zhyu/leetcode,zhyu/leetcode | src/reverseInteger.py | src/reverseInteger.py | class Solution:
# @return an integer
def reverse(self, x):
int_max = 2147483647
limit = int_max/10
if x > 0:
sig = 1
elif x < 0:
sig = -1
x = -x
else:
return x
y = 0
while x:
if y > limit:
return 0
y = y*10 + (x % 10)
x /= 10
return y*sig
| mit | Python | |
6f3ae8a9e8a400b8882cf57fa0753c1c44b85c2a | Create pdf_all.py | moshekaplan/PDF-Tools | pdf_all.py | pdf_all.py | #!/usr/bin/env python
# encoding:UTF-8
"""
This script runs a file through all of the PDF tools
"""
import sys
import pdf_js
import pdf_links
import pdf_strings
import pdf_openaction
def run_all(fpath):
print "*"*20 + "PDF OpenAction" + "*"*20
pdf_openaction.extract_openactions(fpath)
print "*"*20 + "PDF URLs" + "*"*20
pdf_links.extract_urls(fpath)
print "*"*20 + "PDF JavaScript" + "*"*20
pdf_js.extract_js(fpath)
print "*"*20 + "PDF Strings" + "*"*20
pdf_strings.get_strings(fpath)
def main():
if len(sys.argv) < 2:
print "USAGE: %s %s <filename>" % (sys.executable, sys.argv[0])
sys.exit(1)
fpath = sys.argv[1]
run_all(fpath)
if __name__ == "__main__":
main()
| bsd-2-clause | Python | |
288a59cfeade739260a1f76cf632d735677022be | Add the start of some test for the scoring stuff. | prophile/compd,prophile/compd | src/test_scores_db.py | src/test_scores_db.py | import scores_db
import mock
import redis_client
import control
from twisted.internet import defer
def test_set_scores():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.set_match_score(1, 'ABC', 12)
fake_connection.set.assert_called_once_with('comp:scores:1:ABC:game', 12)
| mit | Python | |
a0e4ba8dbdd14f51d17d2fb1c4e0829894d7cd10 | Add utility file for playbook | lozadaOmr/ansible-admin,lozadaOmr/ansible-admin,lozadaOmr/ansible-admin | src/utils/playbook.py | src/utils/playbook.py | from django.conf import settings
from ansible.models import Playbook
import os
def content_loader(pk, slug):
playbook = Playbook.query_set.get(pk=pk)
playbook_dir = playbook.directory
# TODO: for now assume without validation
playbook_file = os.path.join(playbook_dir, slug + '.yml')
return playbook_file
| bsd-3-clause | Python | |
c517e0cb2de9cd813e4b49b6786a07e01005f0b5 | Add fully functional code, prints field and sprinkler coordinates. | mitchtz/Crop_Maximizer | Max_Crop.py | Max_Crop.py | #Find optimal position for a sprinkler in a field with randomly places crops.Takes in h,w,r, where h is height, w is width, r is the sprinker radius
#http://www.reddit.com/r/dailyprogrammer/comments/2zezvf/20150318_challenge_206_intermediate_maximizing/
import random
import math
#Creates field with random crop placements
def create_field(height, width):
#Choices for what can be in field. "." for nothing, "x" for crop
option = [".", "x"]
#List of lists for field
field = []
#Iterate through dimensions and create field
#Iterate through height
for i in range(height):
#Create new row for field
field.append([])
#Iterate through width
for j in range(width):
field[i].append(random.choice(option))
return field
#Finds best place for sprinkler, takes in field, height, width, and radius of sprinkler
def place_sprinkler(field, height, width, radius):
#If field is big enough, start seaching for best placement radius/2 awway from the edge (So corner is not )
if (width < radius) and (height < radius):
start = int(radius/2)
else:
start = 0
#Store best height and width
best_h = 0
best_w = 0
#Store best coverage
best_coverage = 0
#Check through height
for i in range(start, height):
#Then check through width
for j in range(start, width):
#Check if coverage is better
cover = coverage_count(field, i, j, radius, height, width)
if cover > best_coverage:
best_h = i
best_w = j
best_coverage = cover
return [best_h, best_w, best_coverage]
#Return count of crops that are in the radius of the sprinkler
def coverage_count(field, sprinkler_h, sprinkler_w, radius, height, width):
#Check if radius will go outside of map
#Check top of field
if (sprinkler_h - radius) < 0:
start_h = 0
else:
start_h = sprinkler_h - radius
#Check bottom of field
if (sprinkler_h + radius) >= height:
end_h = height-1
else:
end_h = sprinkler_h + radius
#Check left side of field
if (sprinkler_w - radius) < 0:
start_w = 0
else:
start_w = sprinkler_w - radius
#Check right side of field
if (sprinkler_w + radius) >= width:
end_w = width-1
else:
end_w = sprinkler_w + radius
#Store number of crops covered
crop = 0
#Search sqaure that covers sprinkler area
#Check through height
for i in range(start_h, end_h+1):
#Check width
for j in range(start_w, end_w+1):
#Check if square is in radius by getting hypotenuse
if math.sqrt((sprinkler_h-i)**2 + (sprinkler_w-j)**2) <= radius:
#Check if this square is the sprinkler location, if so, then don't count crop, as it would be destroyed placing sprinkler
if (i == sprinkler_h) and (j == sprinkler_w):
continue
else:
#If square is a crop, increment crops covered
if field[i][j] == "x":
crop = crop + 1
return crop
def print_field(field):
for i in field:
row = ""
for j in i:
row = row + j + " "
print(row)
h = int(input("Input field height: "))
w = int(input("Input field width: "))
r = int(input("Input sprinkler radius: "))
field = create_field(h, w)
print_field(field)
print(place_sprinkler(field, h, w, r), "-- Sprinkler Coordinates [height, width, crops covered]. Coordinates are 0 indexed")
'''
#Prints last item in first row
print(field[0][w-1])
'''
| mit | Python | |
41bed7865c9002086f5599059700ed8599c8c7ef | Copy of existing (manual) https debug tool | sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia | Sketches/MPS/ProxyHTTP/https.py | Sketches/MPS/ProxyHTTP/https.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.Util.Console import ConsoleEchoer, ConsoleReader
from Kamaelia.Util.OneShot import OneShot
print """
This is a simple demonstration program that shows that it is possible to
build simple clients for manually connecting to SSL based sources - such
as HTTPS sources.
This program connects the the subversion server for Kamaelia on port
443 on sourceforge - ie on kamaelia.svn.sourceforge.net. When you are
connected you are connected through an encrypted connection, which means
you could type the following and get code back from the server:
GET /svnroot/kamaelia/trunk/Code/Python/Kamaelia/Examples/SimpleGraphicalApps/Ticker/Ulysses HTTP/1.0
Host: kamaelia.svn.sourceforge.net
That's pretty much the purpose of this example program.
"""
Graphline(
MAKESSL = OneShot(" make ssl "), # The actual message here is not necessary
CONSOLE = ConsoleReader(),
ECHO = ConsoleEchoer(),
CONNECTION = TCPClient("kamaelia.svn.sourceforge.net", 443),
linkages = {
("MAKESSL", "outbox"): ("CONNECTION", "makessl"),
("CONSOLE", "outbox"): ("CONNECTION", "inbox"),
("CONSOLE", "signal"): ("CONNECTION", "control"),
("CONNECTION", "outbox"): ("ECHO", "inbox"),
("CONNECTION", "signal"): ("ECHO", "control"),
}
).run()
| apache-2.0 | Python | |
4fd051fd6d048e64f574097a3ca314111087ee45 | Fix up conv models to match current master. | chrinide/theanets,lmjohns3/theanets | theanets/convolution.py | theanets/convolution.py | # -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
| # -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.
A convolutional regression model takes the following inputs during training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample from a batch of data. Each element of axis 2
of ``x`` represents the measurements of a particular input variable across
all times and all data items.
- ``targets``: A two-dimensional array of target output data. Each element
of axis 0 of ``targets`` is expected to be one moment in time. Each
element of axis 1 of ``targets`` holds a single sample from a batch of
data. Each element of axis 2 of ``targets`` represents the measurements of
a particular output variable across all times and all data items.
'''
def __init__(self, layers=(), loss='mse', weighted=False):
super(feedforward.Regressor, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=2, weighted=weighted)
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.
Unlike a feedforward classifier, where the target labels are provided as a
single vector, a recurrent classifier requires a vector of target labels for
each time step in the input data. So a recurrent classifier model requires
the following inputs for training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample in a batch of data. Each element of axis 2 of
``x`` represents the measurements of a particular input variable across
all times and all data items in a batch.
- ``labels``: A one-dimensional vector of integer target labels. Each
element of ``labels`` is expected to be the class index for a single batch
item.
'''
def __init__(self, layers=(), loss='xe', weighted=False):
super(feedforward.Classifier, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=1, weighted=weighted)
| mit | Python |
1dc795fcf3e6c09a9a77fb008ee3b5fe5c7c3719 | fix bug 1035957 - correct received_at column | Tayamarn/socorro,linearregression/socorro,cliqz/socorro,Tayamarn/socorro,m8ttyB/socorro,m8ttyB/socorro,cliqz/socorro,rhelmer/socorro,twobraids/socorro,linearregression/socorro,twobraids/socorro,lonnen/socorro,pcabido/socorro,Serg09/socorro,rhelmer/socorro,pcabido/socorro,Tchanders/socorro,cliqz/socorro,KaiRo-at/socorro,Tchanders/socorro,AdrianGaudebert/socorro,KaiRo-at/socorro,luser/socorro,adngdb/socorro,AdrianGaudebert/socorro,rhelmer/socorro,Serg09/socorro,Tayamarn/socorro,adngdb/socorro,linearregression/socorro,pcabido/socorro,luser/socorro,yglazko/socorro,Tchanders/socorro,rhelmer/socorro,twobraids/socorro,spthaolt/socorro,yglazko/socorro,spthaolt/socorro,luser/socorro,luser/socorro,m8ttyB/socorro,mozilla/socorro,spthaolt/socorro,yglazko/socorro,adngdb/socorro,m8ttyB/socorro,pcabido/socorro,AdrianGaudebert/socorro,cliqz/socorro,mozilla/socorro,KaiRo-at/socorro,KaiRo-at/socorro,spthaolt/socorro,yglazko/socorro,spthaolt/socorro,yglazko/socorro,twobraids/socorro,rhelmer/socorro,mozilla/socorro,Tchanders/socorro,lonnen/socorro,twobraids/socorro,adngdb/socorro,rhelmer/socorro,Serg09/socorro,Serg09/socorro,m8ttyB/socorro,adngdb/socorro,Tchanders/socorro,lonnen/socorro,pcabido/socorro,Tayamarn/socorro,lonnen/socorro,cliqz/socorro,mozilla/socorro,KaiRo-at/socorro,pcabido/socorro,AdrianGaudebert/socorro,linearregression/socorro,Tchanders/socorro,Serg09/socorro,m8ttyB/socorro,cliqz/socorro,Serg09/socorro,mozilla/socorro,Tayamarn/socorro,AdrianGaudebert/socorro,AdrianGaudebert/socorro,luser/socorro,yglazko/socorro,Tayamarn/socorro,linearregression/socorro,adngdb/socorro,linearregression/socorro,luser/socorro,mozilla/socorro,twobraids/socorro,spthaolt/socorro,KaiRo-at/socorro | alembic/versions/391e42da94dd_bug_1035957_use_literal_now_for_.py | alembic/versions/391e42da94dd_bug_1035957_use_literal_now_for_.py | """bug 1035957 - use literal NOW() for received_at, do not evaluate at migration time
Revision ID: 391e42da94dd
Revises: 495bf3fcdb63
Create Date: 2014-07-08 10:55:04.115932
"""
# revision identifiers, used by Alembic.
revision = '391e42da94dd'
down_revision = '495bf3fcdb63'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
op.alter_column(u'raw_adi', u'received_at', server_default=sa.text('NOW()')),
def downgrade():
op.alter_column(u'raw_adi', u'received_at', server_default='2014-06-24 00:29:17.218147+00'),
| mpl-2.0 | Python | |
05ce8407af2075ebcc002583b4224659d19dc9db | Add unit tests for spack help command (#6779) | LLNL/spack,iulian787/spack,krafczyk/spack,mfherbst/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,tmerrick1/spack,krafczyk/spack,mfherbst/spack,matthiasdiener/spack,krafczyk/spack,EmreAtes/spack,LLNL/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,iulian787/spack,mfherbst/spack,tmerrick1/spack,mfherbst/spack,krafczyk/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,iulian787/spack,LLNL/spack,matthiasdiener/spack,matthiasdiener/spack,LLNL/spack,tmerrick1/spack,EmreAtes/spack,krafczyk/spack | lib/spack/spack/test/cmd/help.py | lib/spack/spack/test/cmd/help.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import pytest
from spack.main import SpackCommand
@pytest.mark.xfail
def test_reuse_after_help():
"""Test `spack help` can be called twice with the same SpackCommand."""
help_cmd = SpackCommand('help')
help_cmd()
# This second invocation will somehow fail because the parser no
# longer works after add_all_commands() is called in
# SpackArgumentParser.format_help_sections().
#
# TODO: figure out why this doesn't work properly and change this
# test to use a single SpackCommand.
#
# It seems that parse_known_args() finds "too few arguments" the
# second time through b/c add_all_commands() ends up leaving extra
# positionals in the parser. But this used to work before we loaded
# commands lazily.
help_cmd()
def test_help():
"""Sanity check the help command to make sure it works."""
help_cmd = SpackCommand('help')
out = help_cmd()
assert 'These are common spack commands:' in out
def test_help_all():
"""Test the spack help --all flag"""
help_cmd = SpackCommand('help')
out = help_cmd('--all')
assert 'Complete list of spack commands:' in out
def test_help_spec():
"""Test the spack help --spec flag"""
help_cmd = SpackCommand('help')
out = help_cmd('--spec')
assert 'spec expression syntax:' in out
def test_help_subcommand():
"""Test the spack help subcommand argument"""
help_cmd = SpackCommand('help')
out = help_cmd('help')
assert 'get help on spack and its commands' in out
| lgpl-2.1 | Python | |
3590a162363ff62859eccf9f7f46c74f2c5cadc4 | Create PAKvsIND.py | salekali/PAKvsIND-Sentiment-Analysis | PAKvsIND.py | PAKvsIND.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 01:56:57 2017
@author: Muhammad Salek Ali
"""
# 1- Importing libraries for twitter and NLP
#--------------------------------------------
import numpy as np
import tweepy
from textblob import TextBlob
# 2- Authentication
#-------------------
consumerKey= 'enter_yours_here'
consumerSecret= 'enter_yours_here'
accessToken='enter_yours_here'
accessTokenSecret='enter_yours_here'
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
# 3- Prepare query features
#---------------------------
teams = ['India', 'Pakistan']
hashtag = "PAKvsIND"
#Tweets to be collected:
fromDate = "2017-06-17"
# 4- Analysis Result Labels
#----------------------------
def senti(analysis, threshold = 0):
if analysis.sentiment[0]>threshold:
return 'Positive'
elif analysis.sentiment[0]==threshold:
return 'Neutral'
else:
return 'Negative'
# 5- Retrieve Tweets and Save Them
#----------------------------------
meanPolarities = dict()
for team in teams:
teamPolarities = []
#Save the tweets in csv
with open('%s_tweets.csv' % team, 'w') as teamFile:
teamFile.write('tweet,label,sentiment\n')
maxQuery=5000
for tweet in tweepy.Cursor(api.search,
q=[hashtag,team],
include_entities=True
).items():
text = TextBlob(tweet.text)
#Get the label corresponding to the sentiment analysis
teamPolarities.append(text.sentiment[0])
print (len(teamPolarities))
print (senti(text))
teamFile.write('%s,%s,%s\n' % (tweet.text.encode('utf8'), senti(text),str(text.sentiment[0])))
if (len(teamPolarities)>=maxQuery):
break
#Save the mean for final results
meanPolarities[team] = np.mean(teamPolarities)
# 6- Output a Result
#--------------------
print ('Mean Sentiment Polarities:')
print(meanPolarities)
| mit | Python | |
3b82f7ada9e80eb581cf924dbf7b0490f864b264 | break at 500 | fbcom/project-euler | 012_highly_divisible_triangular_number.py | 012_highly_divisible_triangular_number.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Highly divisible triangular number" – Project Euler Problem No. 12
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=12
#
i = n = 0
while True:
i = i + 1
n = i*(i+1)/2 # sum of 1 to i
counter = 2 # 1 and n are two trivial factors of n
for d in range(2, int(n**0.5)+1):
if n % d == 0:
counter = counter + 2
if counter > 500:
break
print "Solution: %d has %d factors" % (n, counter)
| mit | Python | |
a3b8fe98d82e6e82267599fdd9f8ecea684fb603 | Add import script | OpenChemistry/materialsdatabank,OpenChemistry/materialsdatabank,OpenChemistry/materialsdatabank,OpenChemistry/materialsdatabank | mdb/__init__.py | mdb/__init__.py | import sys
import click
import types
import bibtexparser
from girder_client import GirderClient
class MDBCli(GirderClient):
def __init__(self, username, password, api_url=None, api_key=None):
def _progress_bar(*args, **kwargs):
bar = click.progressbar(*args, **kwargs)
bar.bar_template = "[%(bar)s] %(info)s %(label)s"
bar.show_percent = True
bar.show_pos = True
def formatSize(length):
if length == 0:
return '%.2f' % length
unit = ''
# See https://en.wikipedia.org/wiki/Binary_prefix
units = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while True:
if length <= 1024 or len(units) == 0:
break
unit = units.pop(0)
length /= 1024.
return '%.2f%s' % (length, unit)
def formatPos(_self):
pos = formatSize(_self.pos)
if _self.length_known:
pos += '/%s' % formatSize(_self.length)
return pos
bar.format_pos = types.MethodType(formatPos, bar)
return bar
_progress_bar.reportProgress = sys.stdout.isatty()
super(MDBCli, self).__init__(
apiUrl=api_url, progressReporterCls=_progress_bar)
interactive = password is None
if api_key:
self.authenticate(apiKey=api_key)
elif username:
self.authenticate(username, password, interactive=interactive)
@click.group()
@click.option('--api-url', default=None,
help='RESTful API URL '
'(e.g https://girder.example.com:443/%s)' % GirderClient.DEFAULT_API_ROOT)
@click.option('--api-key', envvar='GIRDER_API_KEY', default=None,
help='[default: GIRDER_API_KEY env. variable]')
@click.option('--username', default=None)
@click.option('--password', default=None)
@click.pass_context
def cli(ctx, username, password, api_key, api_url):
ctx.obj = MDBCli(
username, password, api_url=api_url, api_key=api_key)
@cli.command('import', help='Import document.')
@click.option('--bibtex-file', default=None,
help='path to bibtex file for paper',
required=True,
type=click.Path(exists=True, dir_okay=False, readable=True))
@click.option('--emd-file', default=None,
help='path to emd file containing the reconstruction',
type=click.Path(exists=True, dir_okay=False, readable=True))
@click.option('--cjson-file', default=None,
help='path to cjson file containing structure',
type=click.Path(exists=True, dir_okay=False, readable=True))
@click.pass_obj
def _import(gc, bibtex_file=None, emd_file=None, cjson_file=None):
with open(bibtex_file) as bibtex_file:
bibtex_database = bibtexparser.load(bibtex_file)
entry = bibtex_database.entries[0]
authors = entry['author'].split(' and ')
authors.remove('others')
paper = entry['title']
tomo = {
'authors': authors,
'paper': paper
}
tomo = gc.post('tomo', json=tomo)
me = gc.get('/user/me')
private_folder = next(gc.listFolder(me['_id'], 'user', 'Private'))
folder = gc.loadOrCreateFolder('mdb', private_folder['_id'], 'folder')
# Upload reconstructions
recon_file = gc.uploadFileToFolder(folder['_id'], emd_file)
print('Creating reconstruction ...')
recon = {
'fileId': recon_file['_id']
}
gc.post('tomo/%s/reconstructions' % tomo['_id'], json=recon)
# Upload structure
struc_file = gc.uploadFileToFolder(folder['_id'], cjson_file)
print('Creating structure ...')
struc = {
'fileId': struc_file['_id']
}
gc.post('tomo/%s/structures' % tomo['_id'], json=struc)
| bsd-3-clause | Python | |
f31dd0c7f23273207eab5e30a3ea42b5edf30f2b | work in progress, script to balance PTR records | menandmice/mnm-reversezone-tools | mnm-balance-reversezones.py | mnm-balance-reversezones.py | #!/usr/bin/env python3
# Copyright (C) 2013 Men & Mice
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND MEN & MICE DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL MEN & MICE BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
"""
This script balances the forward lookup and reverse lookup zones. It
checks that all A/AAAA record entries have matching PTR records, and
it looks for PTR records without matching A/AAAA records (orphaned PTR
records).
It is the script version of the "reverse-zone-wizard" and the "find
orphan PTR records" functions in the Men & Mice GUI.
This script requires the Men & Mice Suite CLI mmcmd
Author: Carsten Strotmann - carsten@menandmice.com
Version: 0.1 (wip)
Date: 2013-09-04
"""
import os
import sys
import subprocess
import string
from optparse import OptionParser
server = "127.0.0.1"
mmcmdpgm = "/usr/bin/mmcmd"
user = "administrator"
password = "menandmice"
masterserver = "ns1.example.com"
def mmcmd(cmd, debugflag=False):
if debugflag:
print("mmcmd {}".format(cmd))
output = subprocess.check_output([mmcmdpgm,
"-q", "-s{}".format(server),
"-u{}".format(user),
"-p{}".format(password),
"{}; quit;".format(cmd)], timeout=60).decode("utf8")
return output
# Main program
if __name__ == "__main__":
parser = OptionParser(usage="Usage: %prog [--help | options]")
parser.add_option("-d", action="store_true", dest="debugflag",
default=False, help="print debug information")
parser.add_option("-r", action="store_true", dest="removeflag",
default=False,
help="remove orphaned PTR records")
parser.add_option("-a", action="store_true", dest="addflag",
default=False,
help="add missing PTR records")
(options, args) = parser.parse_args()
print ("Balancing reverse zones ...")
zones = mmcmd("zones", options.debugflag).lower()
zones = zones.splitlines()
zonelist = [z.split(" ",1)[0] for z in zones]
zonelist = [z for z in zonelist if not '::' in z]
rev4zonelist = [z for z in zonelist if z.endswith("in-addr.arpa")]
rev6zonelist = [z for z in zonelist if z.endswith("ip6.arpa")]
| isc | Python | |
2187ae0b6303ae1745749b270c5c46937d8dde33 | Create mexican_wave.py | Kunalpod/codewars,Kunalpod/codewars | mexican_wave.py | mexican_wave.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Mexican Wave
#Problem level: 6 kyu
def wave(str):
li=[]
for i in range(len(str)):
x=list(str)
x[i]=x[i].upper()
li.append(''.join(x))
return [x for x in li if x!=str]
| mit | Python | |
fdc40675eabaeee191fa3a047705b677d431f58c | Create a small form class to facilitate easy use of Djangos CSRF functionality | Jonpro03/Minecrunch_Web,Jonpro03/Minecrunch_Web,Jonpro03/Minecrunch_Web | src/whitelist/util/apply_whitelist_form.py | src/whitelist/util/apply_whitelist_form.py | from django import forms
class ApplyWhitelistForm(forms.Form):
"""A small placeholder form class to allow Django's form magic to take
hold of the "apply whitelist" button.
"""
pass
| mit | Python | |
252c0916e4db033c3aee81e232a64e649f6bc926 | add a command to trigger a bulk sync | crateio/crate.pypi,crate-archive/crate-site,crate-archive/crate-site | crate_project/apps/crate/management/commands/trigger_bulk_sync.py | crate_project/apps/crate/management/commands/trigger_bulk_sync.py | from django.core.management.base import BaseCommand
from pypi.tasks import bulk_synchronize
class Command(BaseCommand):
def handle(self, *args, **options):
bulk_synchronize.delay()
print "Bulk Synchronize Triggered"
| bsd-2-clause | Python | |
457ba730a6541ab27ce8cbe06cbb6bfe246bba74 | Add a simple HTTP Basic Authentication decorator for the API | matthiask/towel,matthiask/towel,matthiask/towel,matthiask/towel | towel/api/decorators.py | towel/api/decorators.py | from functools import wraps
import httplib
from django.contrib.auth import authenticate
from django.utils.cache import patch_vary_headers
def http_basic_auth(func):
@wraps(func)
@vary_on_headers('Authorization')
def _decorator(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
meth, _, auth = request.META['HTTP_AUTHORIZATION'].partition(' ')
if meth.lower() == 'basic':
try:
auth = auth.strip().decode('base64')
except Exception:
return HttpResponse('Invalid authorization header',
status=httplib.BAD_REQUEST)
username, sep, password = auth.partition(':')
user = authenticate(username=username, password=password)
if user:
request.user = user
return func(request, *args, **kwargs)
return _decorator
| bsd-3-clause | Python | |
457937561f6a581edd495d7f9559f57b94108c24 | add really basic game implementation | peterolph/hAIve,peterolph/hAIve | haive/game.py | haive/game.py |
# An interactive wrapper for the model
from haive import model
from collections import namedtuple
def tuple_from_string(string):
return tuple(int(item) for item in string.split(','))
human = 'human'
ai = 'ai'
player_types = (human, ai)
Move = namedtuple('Move', ('token','source','destination'))
class Game(object):
def __init__(self, m, players, ai=None):
self.m = m
self.ai = ai
self.active_player = model.black
self.players = players
def make_move(self, move):
if move.token is not None:
self.m.add(move.token, move.destination)
elif move.source is not None:
self.m.move(move.source, move.destination)
else:
raise ValueError
def human_move(self):
print(self.render_model())
source, destination = input("Please enter a move for "+self.active_player+": ").split(" ")
if source in model.kinds:
human_move = Move(token=model.Token(colour=self.active_player, kind=source), source=None, destination=tuple_from_string(destination))
else:
human_move = Move(token=None, source=tuple_from_string(source), destination=tuple_from_string(destination))
self.make_move(human_move)
def ai_move(self):
self.make_move(self.ai.choose_move(self.m, self.active_player))
def play(self):
while self.m.winner() is None:
if self.players[self.active_player] == human:
self.human_move()
elif self.players[self.active_player] == ai:
self.ai_move()
else:
raise ValueError
self.active_player = self.m.colour_opposite(self.active_player)
return self.m.winner()
def render_model(self):
print(self.m.state)
if __name__ == '__main__':
game = Game(model.Model(), {model.black:human, model.white:human})
winner = game.play()
print(winner, "won!") | mit | Python | |
c8d441fbee372abc61867d594f0645d9d79a36f0 | add raw data parser script | horizon3385/websiteClassifier,horizon3385/websiteClassifier | parseRawData/parseRawXML.py | parseRawData/parseRawXML.py | #!/usr/bin/python
import sys
import json
from bs4 import BeautifulSoup
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def extract_xml(markup):
"""
Extract `url`, `title`, 'description`, and `topic` from XML 'ExternalPage' markup
"""
soup = BeautifulSoup(markup, 'lxml')
tag = soup.externalpage
result = dict()
result['url'] = tag['about']
result['title'] = tag.title.text
result['description'] = tag.description.text
result['topic'] = tag.topic.text.split('/')
return result
def main(argv):
cnt = 0
xml = ''
trigger = False
with sys.stdin as f:
for line in f:
# set up start tag
if '<ExternalPage' in line:
trigger = True
# set up close tag
elif '</ExternalPage>' in line:
cnt += 1
trigger = False
xml += line.rstrip('\n')
# process xml block
result = extract_xml(xml)
logger.info("processed\t%d\t%s" % (cnt, result['url']))
print >> sys.stdout, json.dumps(result)
xml = ''
if trigger:
xml += line.rstrip('\n').replace('d:', '')
if __name__ == "__main__":
main(sys.argv[1:])
| mit | Python | |
e83dd0bfa4f601ed3c5ea9687d2781e83a2e6bf4 | Add logger | johnveitch/cpnest | cpnest/logger.py | cpnest/logger.py | import logging
def start_logger(output=None, verbose=0):
"""
Start an instance of Logger for logging
output : `str`
output directory (./)
verbose: `int`
Verbosity, 0=CRITICAL, 1=WARNING, 2=INFO, 3=DEBUG
fmt: `str`
format for logger (None) See logging documentation for details
"""
# possible levels
verbose = min(verbose, 3)
# levels 0, 1, 2, 3
levels = ['CRITICAL', 'WARNING', 'INFO', 'DEBUG']
level = levels[verbose]
fmt = '%(asctime)s - %(name)-8s: %(message)s'
# setup logger
logger = logging.getLogger('CPNest')
logger.setLevel(level)
# handle command line output
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(fmt, datefmt='%Y-%m-%d, %H:%M:%S'))
logger.addHandler(ch)
if output is not None:
# log to file
fh = logging.FileHandler(output + 'cpnest.log')
fh.setFormatter(logging.Formatter(fmt))
logger.addHandler(fh)
print(logger.critical('Logging level: {}'.format(level)))
return logger
| mit | Python | |
5fe88aa7d814bb630c29a7afcf511caba8c03ece | add placeholder | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | htdocs/c/c.py | htdocs/c/c.py | import os
import sys
tilecachepath, wsgi_file = os.path.split(__file__)
sys.path.insert(0, "/opt/iem/include/python/")
sys.path.insert(0, "/opt/iem/include/python/TileCache/")
from TileCache.Service import Service, wsgiHandler
cfgfiles = os.path.join(tilecachepath, "tilecache.cfg")
theService = {}
def wsgiApp(environ, start_response):
global theService
cfgs = cfgfiles
if not theService:
theService = Service.load(cfgs)
return wsgiHandler(environ, start_response, theService)
application = wsgiApp
| mit | Python | |
4a1b7c7e1c6bd1df2d31e37a0cf97853faafb8e5 | Add BrowserScraper class | ransomwarezz/instagram-scraper | BrowserScraper.py | BrowserScraper.py | import time
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
class BrowserScraper():
def __init__(self, username, level, driver=None):
if driver is None:
self.driver = webdriver.Chrome('./chromedriver')
pass
def wait(self):
time.sleep(3)
def scrapeUser(self, userLink):
self.driver.get(userLink)
links = self.driver.find_elements_by_tag_name('a')
users = []
safetyCounter = 0
for link in links:
try:
linkAdress = link.get_attribute("href")
except StaleElementReferenceException:
links = self.driver.find_elements_by_tag_name('a')[:safetyCounter]
continue
if "taken-by" in linkAdress:
link.click()
self.wait()
potentialUsers = self.driver.find_elements_by_tag_name('a')
if potentialUsers is not None:
for potentialUser in potentialUsers:
if potentialUser.get_attribute('href') is not None and potentialUser.get_attribute(
'title') in potentialUser.get_attribute('href') and BrowserScraper.isUserLink(
potentialUser):
users.append(potentialUser.get_attribute('href'))
potentialCloseButtons = self.driver.find_elements_by_tag_name('button')
for potentialCloseButton in potentialCloseButtons:
if 'CLOSE' in potentialCloseButton.text.upper():
potentialCloseButton.click()
safetyCounter += 1
return set(users)
@classmethod
def getUserLink(cls, username):
return "https://www.instagram.com/" + username + "/"
@classmethod
def isUserLink(cls, link):
address = link.get_attribute('href')
parent = link.find_element_by_xpath('..')
return "/accounts/" not in address and \
"/p/" not in address and \
"/legal/" not in address and \
'LI' in parent.get_property("tagName").upper() and \
'/blog.instagram.com/' not in address and \
'/about/' not in address and \
'/explore/' not in address and \
'/developer/' not in address and \
'instagram-press.com' not in address and \
'help.instagram.com' not in address
| mit | Python | |
c313a21274f4e77d0c4baad13c5c0f5781ac13ef | Create special-binary-string.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode | Python/special-binary-string.py | Python/special-binary-string.py | # Time: f(n) = kf(n/k) + n/k * klogk <= O(logn * nlogk) <= O(n^2)
# n is the length of S, k is the max number of special strings in each depth
# Space: O(n)
class Solution(object):
def makeLargestSpecial(self, S):
"""
:type S: str
:rtype: str
"""
result = []
j = count = 0
for i, v in enumerate(S):
count += 1 if v == '1' else -1
if count == 0:
result.append("1{}0".format(self.makeLargestSpecial(S[j+1:i])))
j = i+1
result.sort(reverse = True)
return "".join(result)
| mit | Python | |
63f3e2027948d98781bdd66a0341501facb4b46c | Add test file | karlaking/rock-reconstruction | image_test.py | image_test.py | import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
70ba6d35682c4cad67ed3950542505557e97b86a | Create selenium-auth.py | Harinus/ZAP-Selenium-Auth | selenium-auth.py | selenium-auth.py | # The sendingRequest and responseReceived functions will be called for all requests/responses sent/received by ZAP,
# including automated tools (e.g. active scanner, fuzzer, ...)
# Note that new HttpSender scripts will initially be disabled
# Right click the script in the Scripts tree and select "enable"
# 'initiator' is the component the initiated the request:
# 1 PROXY_INITIATOR
# 2 ACTIVE_SCANNER_INITIATOR
# 3 SPIDER_INITIATOR
# 4 FUZZER_INITIATOR
# 5 AUTHENTICATION_INITIATOR
# 6 MANUAL_REQUEST_INITIATOR
# 7 CHECK_FOR_UPDATES_INITIATOR
# 8 BEAN_SHELL_INITIATOR
# 9 ACCESS_CONTROL_SCANNER_INITIATOR
# For the latest list of values see the HttpSender class:
# https://github.com/zaproxy/zaproxy/blob/master/src/org/parosproxy/paros/network/HttpSender.java
# 'helper' just has one method at the moment: helper.getHttpSender() which returns the HttpSender
# instance used to send the request.
#
# New requests can be made like this:
# msg2 = msg.cloneAll() // msg2 can then be safely changed as required without affecting msg
# helper.getHttpSender().sendAndReceive(msg2, false);
# print('msg2 response=' + str(msg2.getResponseHeader().getStatusCode()))
def sendingRequest(msg, initiator, helper):
#print ('sendingRequest called for url=' + msg.getRequestHeader().getURI().toString())
pass
def regparser(logoutIndicators, msg):
import re
msgstring = ""
rexstring = ""
if any("STATUS" in s for s in logoutIndicators):
msgstring = msgstring + "<#STATUS>" + str(msg.getResponseHeader().getStatusCode()) + "</#STATUS>"
if any("HEADER" in s for s in logoutIndicators):
msgstring = msgstring + "<#HEADER>" + msg.getResponseHeader().toString() + "</#HEADER>"
if any("BODY" in s for s in logoutIndicators):
msgstring = msgstring + "<#BODY>" + msg.getResponseBody().toString() + "</#BODY>"
for indicatorDict in logoutIndicators:
if rexstring != "":
rexstring = rexstring + "|"
rexstring = rexstring + "("
for key in indicatorDict:
rexstring = rexstring + "((?=.*?<#"+ key +">).*?" + str(indicatorDict[key]) + ".*?</#" + key + ">)"
rexstring = rexstring + ")"
regex = re.compile(rexstring, re.DOTALL)
return re.search(regex, msgstring)
def responseReceived(msg, initiator, helper):
print('responseReceived called for url=' + msg.getRequestHeader().getURI().toString())
if initiator == 2 or initiator == 3 or initiator == 4 or initiator == 6:
logoutIndicators = []
logoutIndicators.append({'STATUS':'401'})
logoutIndicators.append({'STATUS':'302', 'HEADER':'Location.*login'})
logoutIndicators.append({'STATUS':'200', 'BODY':'Please login'})
#print logoutIndicators
if regparser(logoutIndicators, msg) is not None:
print "AUTHENTICATION REQUIRED! Your initiator is: " + str(initiator)
import os
import org.parosproxy.paros.control.Control
import org.zaproxy.zap.extension.httpsessions.ExtensionHttpSessions
import org.zaproxy.zap.extension.httpsessions.HttpSessionsSite
zapsessions = org.parosproxy.paros.control.Control.getSingleton().getExtensionLoader().getExtension(org.zaproxy.zap.extension.httpsessions.ExtensionHttpSessions.NAME)
sessionSite = zapsessions.getHttpSessionsSite(msg.getRequestHeader().getURI().getHost() + ":" + str(msg.getRequestHeader().getHostPort()), False)
if sessionSite.getHttpSession("Auth-Selenium") is not None:
sessionSite.createEmptySession("Re-Auth-Selenium " + str(sessionSite.getNextSessionId()))
else:
sessionSite.createEmptySession("Auth-Selenium")
import subprocess as sub
selenese = sub.Popen("java -jar C:\Users\*\Desktop\Selenium_Custom.b1f2cf5.jar --strict-exit-code --proxy localhost:8080 --screenshot-on-fail C:\Users\*\Desktop\screehns --set-speed 2000 --cli-args /private-window --cli-args about:blank C:\Users\*\Desktop\WebGoat.html", stdout=sub.PIPE)
#Get Port from config!
#Lib Folder
#Test Case by naming
output = selenese.communicate()[0]
returns = selenese.returncode
if returns != 0:
print "AUTHENTICATION FAILURE!"
print output
else:
print "Auth-SUCCESS"
else:
pass
#print "rcv-ignore"
else:
pass
#print "via-proxy " + str(msg.getResponseHeader().getStatusCode())
| apache-2.0 | Python | |
a4ca12fb7f3525206a9a921ab64e31bc145cc9d3 | Create __init__.py | OdooCommunityWidgets/mass_mailing_themes_community,OdooCommunityWidgets/mass_mailing_themes_boilerplate | __init__.py | __init__.py | mit | Python | ||
102ad365089794d337820714ab281f99af0797b0 | update make_base_url | forrest-mao/python-sdk,weilaihui/tornado-qiniu,jemygraw/python-sdk,qiniu/python-sdk,blahgeek/python-sdk | qiniu/auth_token.py | qiniu/auth_token.py | # -*- coding: utf-8 -*-
import json
import base64
import time
import rpc
import config
import urllib
import auth_digest
class PutPolicy(object):
scope = None # 可以是 bucketName 或者 bucketName:key
expires = 3600 # 默认是 3600 秒
callbackUrl = None
callbackBody = None
returnUrl = None
returnBody = None
endUser = None
asyncOps = None
def __init__(self, scope):
self.scope = scope
def token(self, mac=None):
if mac is None:
mac = auth_digest.Mac()
token = dict(
scope = self.scope,
deadline = int(time.time()) + self.expires,
)
if self.callbackUrl is not None:
token["callbackUrl"] = self.callbackUrl
if self.callbackBody is not None:
token["callbackBody"] = self.callbackBody
if self.returnUrl is not None:
token["returnUrl"] = self.returnUrl
if self.returnBody is not None:
token["returnBody"] = self.returnBody
if self.endUser is not None:
token["endUser"] = self.endUser
if self.asyncOps is not None:
token["asyncOps"] = self.asyncOps
b = json.dumps(token, separators=(',',':'))
return mac.sign_with_data(b)
class GetPolicy(object):
expires = 3600
def __init__(self):
pass
def make_request(self, base_url, mac=None):
'''
* return private_url
'''
if mac is None:
mac = auth_digest.Mac()
deadline = int(time.time()) + self.expires
if '?' in base_url:
base_url += '&'
else:
base_url += '?'
base_url = '%se=%s' % (base_url, str(deadline))
token = mac.sign(base_url)
return '%s&token=%s' % (base_url, token)
def make_base_url(domain, key):
'''
* domain => str
* key => str
* return base_url
'''
return 'http://%s/%s' % (domain, urllib.quote(key))
| # -*- coding: utf-8 -*-
import json
import base64
import time
import rpc
import config
import urllib
import auth_digest
class PutPolicy(object):
scope = None # 可以是 bucketName 或者 bucketName:key
expires = 3600 # 默认是 3600 秒
callbackUrl = None
callbackBody = None
returnUrl = None
returnBody = None
endUser = None
asyncOps = None
def __init__(self, scope):
self.scope = scope
def token(self, mac=None):
if mac is None:
mac = auth_digest.Mac()
token = dict(
scope = self.scope,
deadline = int(time.time()) + self.expires,
)
if self.callbackUrl is not None:
token["callbackUrl"] = self.callbackUrl
if self.callbackBody is not None:
token["callbackBody"] = self.callbackBody
if self.returnUrl is not None:
token["returnUrl"] = self.returnUrl
if self.returnBody is not None:
token["returnBody"] = self.returnBody
if self.endUser is not None:
token["endUser"] = self.endUser
if self.asyncOps is not None:
token["asyncOps"] = self.asyncOps
b = json.dumps(token, separators=(',',':'))
return mac.sign_with_data(b)
class GetPolicy(object):
expires = 3600
def __init__(self):
pass
def make_request(self, base_url, mac=None):
'''
* return private_url
'''
if mac is None:
mac = auth_digest.Mac()
deadline = int(time.time()) + self.expires
if '?' in base_url:
base_url += '&'
else:
base_url += '?'
base_url = '%se=%s' % (base_url, str(deadline))
token = mac.sign(base_url)
return '%s&token=%s' % (base_url, token)
def make_base_url(domain, key):
'''
* domain => str
* key => str
* return base_url
'''
return ''.join(['http://', domain, '/', urllib.quote(key)])
| mit | Python |
647e3b463d1b71ea1a3bd34d11e6a5855b4ea70d | Create __init__.py | annettechun25/cs3240-labdemo | __init__.py | __init__.py | mit | Python | ||
988598d0385ce63d951b3cc0817392cf2271575c | change encoding to utf8 | xxxspy/sae_storage_push | __init__.py | __init__.py | import sys
if not sys.getdefaultencoding()=='utf8':
reload(sys)
sys.setdefaultencoding('utf8')
| isc | Python | |
04530dd3def6f8ff158df7b607c367f5f273fd1b | add pythainlp.tools | PyThaiNLP/pythainlp | pythainlp/tools/__init__.py | pythainlp/tools/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import,unicode_literals
def test_segmenter(segmenter, test):
words = test
result = segmenter
correct = (result == words)
if not correct:
print ('expected', words)
print('got ', result)
return correct
if __name__ == "__main__":
from pythainlp.tokenize import word_tokenize
text="ฉันเป็นคนและฉันรักภาษาไทยฉันอยู่ประเทศไทยฉันศึกษาอยู่ที่มหาวิทยาลัยพายุฝนกำลังมาต้องหลบแล้วล่ะคุณสบายดีไหม"
test=["ฉัน","เป็น","คน","และ","ฉัน","รัก","ภาษาไทย","ฉัน","อยู่","ประเทศไทย","ฉัน","ศึกษา","อยู่","ที่","มหาวิทยาลัย","พายุฝน","กำลัง","มา","ต้อง","หลบ","แล้ว","ล่ะ","คุณ","สบายดี","ไหม"]
print("icu :")
pyicu=test_segmenter(word_tokenize(text,engine='icu'),test)
print(pyicu)
print("newmm :")
newmm=test_segmenter(word_tokenize(text,engine='newmm'),test)
print(newmm)
print("mm :")
mm=test_segmenter(word_tokenize(text,engine='mm'),test)
print(mm) | apache-2.0 | Python | |
879032d31d8cf89df14489107015b7f29ace1490 | Solve designer door mat | rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank | python/designer-door-mat.py | python/designer-door-mat.py | # Size: 7 x 21
# ---------.|.---------
# ------.|..|..|.------
# ---.|..|..|..|..|.---
# -------WELCOME-------
# ---.|..|..|..|..|.---
# ------.|..|..|.------
# ---------.|.---------
class DoorMat:
DASH = "-"
DOT = "."
PIPE = "|"
WELCOME = "WELCOME"
def __init__(self, N, M):
self.N = N
self.M = M
self.middle_line_number = (self.N - 1) // 2
def print_door_mat(self):
[print(self.line(line_number)) for line_number in range(0, N)]
def line(self, line_number):
if(line_number == self.middle_line_number):
return self.welcome_line()
return self.design_line(line_number)
def welcome_line(self):
return '{:{fill}^{line_length}}'.format(self.WELCOME, fill=self.DASH, line_length=self.M)
def design_line(self, line_number):
return '{:{fill}^{line_length}}'.format(self.design_for_line(line_number), fill=self.DASH, line_length=self.M)
def design_for_line(self, line_number):
return self.DOT + self.middle_design(line_number) + self.DOT
def middle_design(self, line_number):
if(line_number == 0):
return self.PIPE
pattern = self.PIPE + self.DOT + self.DOT
return pattern * self.pattern_repeat_for_line(line_number) + self.PIPE
def pattern_repeat_for_line(self, line_number):
if line_number < self.middle_line_number:
return line_number * 2
pattern_repeat = (self.N - 1 - line_number) * 2
# print("pattern repeat: {} for line number: {}".format(pattern_repeat, line_number))
return pattern_repeat
N, M = map(int, input().strip().split(" "))
doormat = DoorMat(N, M)
doormat.print_door_mat()
| mit | Python | |
0b9926313831b8fd5c2e72cfc2559f7bdd1c2855 | Add class utils | abenicho/isvr | nisl/_utils/class_helper.py | nisl/_utils/class_helper.py | from sets import Set
import inspect
def get_params(_class, _object, ignore=None):
_ignore = Set(['memory', 'memory_level', 'verbose', 'copy'])
if ignore is not None:
_ignore.update(ignore)
# params is a dictionary
params = _class.get_params(_object)
for i in _ignore:
if i in params:
params.pop(i)
for p in params:
if hasattr(_object, p):
params[p] = getattr(_object, p)
return params
def retrieve_scope():
try:
caller_frame = inspect.currentframe().f_back.f_back
if 'self' in caller_frame.f_locals:
caller_name = caller_frame.f_locals['self'].__class__.__name__
caller_name = '%s.%s' % (caller_name,
caller_frame.f_code.co_name)
#caller_name = caller_frame.f_code.co_name
return caller_name
except Exception:
return 'Unknown'
| bsd-3-clause | Python | |
f71d6b2fe05290ab976e3ba433185ec649a35c20 | Move get_context_from_function_and_args() to context.py | varunarya10/oslo.log,JioCloud/oslo.log,dims/oslo.log,zzicewind/oslo.log,openstack/oslo.log,magic0704/oslo.log,akash1808/oslo.log,meganjbaker/oslo.log | openstack/common/context.py | openstack/common/context.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
import uuid
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_tok = auth_tok
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_tok,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import uuid
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_tok = auth_tok
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_tok,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
| apache-2.0 | Python |
417e76de84f067a90fdcda93ee82d63cc7e56c7b | Add downloader.py | fsteffek/steamplog | steamplog/downloader.py | steamplog/downloader.py | from __future__ import print_function
import sys
import json
import urllib2
class Downloader(object):
'''Download data via Steam-API'''
def __init__(self, api_key):
self.API = api_key
def download_stats(self, steam_id):
'''Download owned games from Steam Web API'''
URL = ['https://api.steampowered.com/'
'IPlayerService/GetOwnedGames/v0001/'
'?include_played_free_games=1&format=json',
'&key=', self.API,
'&steamid=', steam_id]
URL = ''.join([url_str for url_str in URL])
try:
request = urllib2.urlopen(URL)
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print('We failed to reach the server.', file=sys.stderr)
print('Reason: ', e.reason, file=sys.stderr)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.',
file=sys.stderr)
print('Error code: ', e.code, file=sys.stderr)
sys.exit(1)
response = json.load(request)
if "games" not in response['response']:
print >> sys.stderr, 'ERROR: No games found'
sys.exit(1)
else:
return response['response']['games']
def download_applist(self):
'''Download app names dictionary and return as list'''
URL = 'http://api.steampowered.com/ISteamApps/GetAppList/v2'
try:
request = urllib2.urlopen(URL)
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print('We failed to reach ', URL, file=sys.stderr)
print('Reason: ', e.reason, file=sys.stderr)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.',
file=sys.stderr)
print('Error code: ', e.code, file=sys.stderr)
sys.exit(1)
json_dict = json.load(request)
return [(app['appid'], app['name'])
for app in json_dict['applist']['apps']]
| mit | Python | |
f9b8a92359e15883c5ee7b4dbc259001d59e379d | introduce bnip classes | Islast/BrainNetworksInPython,Islast/BrainNetworksInPython | BrainNetworksInPython/scripts/classes.py | BrainNetworksInPython/scripts/classes.py | import numpy as np
import networkx as nx
import pandas as pd
import make_graphs as mkg
import graph_measures as gm
def cascader(dict1, dict2, name):
return {key: value.update({name: dict2[key]})
for key, value in dict1.items()}
class BrainNetwork(nx.classes.graph.Graph):
def __init__(self,
network,
parcellation,
centroids,
names_308_style=False):
if isinstance(network, nx.classes.graph.Graph):
# Copy graph
nx.classes.graph.Graph.__init__(self)
self.__dict__.update(network.__dict__)
else:
# Create weighted graph
if isinstance(network, pd.DataFrame):
M = network.values
elif isinstance(network, np.ndarray):
M = network
M[np.diag_indices_from(M)] = 0
nx.classes.graph.Graph.__init__(self, M)
# assign names and centroids to nodes
mkg.assign_node_names(self,
parcellation,
names_308_style=names_308_style)
mkg.assign_node_centroids(self, centroids)
class BinaryBrainNetwork(nx.classes.graph.Graph):
def __init__(self, brainnetwork, cost, mst=True):
nx.classes.graph.Graph.__init__(self)
self.__dict__.update(brainnetwork.__dict__)
self = mkg.threshold_graph(self, self.cost, mst=mst)
self.graph['cost'] = cost
self.graph['mst'] = mst
def partition(self):
nodal_partition, module_partition = gm.calc_nodal_partition(self)
cascader(self._node, nodal_partition, 'module')
self.graph['partition'] = module_partition
def calculate_nodal_measures(self):
'''
Calculates
'''
# ==== SET UP ======================
# If you haven't passed the nodal partition
# then calculate it here
if 'partition' not in self.graph:
self.partition()
# ==== MEASURES ====================
# ---- Degree ----------------------
cascader(self._node, dict(self.degree), 'degree')
# ---- Closeness -------------------
cascader(self._node, nx.closeness_centrality(self), 'closeness')
# ---- Betweenness -----------------
cascader(self._node, nx.betweenness_centrality(self), 'betweenness')
# ---- Shortest path length --------
cascader(self._node, gm.shortest_path(self), 'shortest_path')
# ---- Clustering ------------------
cascader(self._node, nx.clustering(self), 'clustering')
# ---- Participation coefficent ----
cascader(self._node, gm.participation_coefficient(self,
self.graph['partition']), 'pc')
# ---- Euclidean distance and ------
# ---- interhem proporition --------
gm.assign_nodal_distance(self)
gm.assign_interhem(self)
def export_nodal_measures(self):
'''
Returns the node attribute data from G as a pandas dataframe.
'''
return pd.DataFrame.from_dict(self._node).transpose()
| mit | Python | |
cd9da9cf624a80acaebe92e075760ff8c2dbb7b1 | Add test_first_audit_catchup_during_ordering | evernym/plenum,evernym/zeno | plenum/test/audit_ledger/test_first_audit_catchup_during_ordering.py | plenum/test/audit_ledger/test_first_audit_catchup_during_ordering.py | import pytest
from plenum.test import waits
from plenum.common.constants import LEDGER_STATUS, DOMAIN_LEDGER_ID
from plenum.common.messages.node_messages import MessageReq, CatchupReq
from plenum.server.catchup.node_leecher_service import NodeLeecherService
from plenum.test.delayers import ppDelay, pDelay, cDelay, DEFAULT_DELAY
from plenum.test.helper import sdk_send_random_and_check
from plenum.test.node_request.test_timestamp.helper import get_timestamp_suspicion_count
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules, start_delaying, stop_delaying_and_process
from stp_core.loop.eventually import eventually
def delay_domain_ledger_catchup():
def delay(msg):
msg = msg[0]
if isinstance(msg, MessageReq) and \
msg.msg_type == LEDGER_STATUS and \
msg.params.get('ledgerId') == DOMAIN_LEDGER_ID:
return DEFAULT_DELAY
if isinstance(msg, CatchupReq) and \
msg.ledgerId == DOMAIN_LEDGER_ID:
return DEFAULT_DELAY
return delay
def test_first_audit_catchup_during_ordering(tdir, tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
lagging_node = txnPoolNodeSet[-1]
other_nodes = txnPoolNodeSet[:-1]
other_stashers = [node.nodeIbStasher for node in other_nodes]
def lagging_node_state() -> NodeLeecherService.State:
return lagging_node.ledgerManager._node_leecher._state
def check_lagging_node_is_not_syncing_audit():
assert lagging_node_state() != NodeLeecherService.State.SyncingAudit
# Prevent lagging node from catching up domain ledger (and finishing catchup)
with delay_rules(other_stashers, delay_domain_ledger_catchup()):
# Start catchup on lagging node
lagging_node.ledgerManager.start_catchup()
assert lagging_node_state() == NodeLeecherService.State.SyncingAudit
# Ensure that audit ledger is caught up by lagging node
looper.run(eventually(check_lagging_node_is_not_syncing_audit))
assert lagging_node_state() != NodeLeecherService.State.Idle
# Order request on all nodes except lagging one where they goes to stashed state
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
# Now catchup should end and lagging node starts processing stashed PPs
# and resumes ordering
# ensure that all nodes will have same data after that
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
# ensure that no suspicions about obsolete PP have been raised
assert get_timestamp_suspicion_count(lagging_node) == 0
| apache-2.0 | Python | |
cc300b1c0f6ccc4ca50b4f4d20f5f351af698cfc | Add files via upload | bboysingle/dancespace,bboysingle/dancespace | nthprime.py | nthprime.py | #encoding=utf8
'''
Find nth prime number
'''
from math import sqrt
def prime(maxn):
n = 1
while n < maxn:
if is_prime(n):
yield n
n += 1
def is_prime(n):
if n == 1:
return False
for i in range(2, int(sqrt(n))+1):
if n % i == 0:
return False
return True
for i, n in enumerate(prime(100)):
if i == 10:
print(n)
| apache-2.0 | Python | |
98d04ccc3549b0b7597b995b83d60ae570108b8a | Create Adafruit_HTU21D.py | jimfinoc/takeTempAndHumidity | Adafruit_HTU21D.py | Adafruit_HTU21D.py | #!/usr/bin/python
import time
from Adafruit_I2C import Adafruit_I2C
# ===========================================================================
# HTU21D Class
#
# Code only test with a Sparkfun HTU21D Sensor module on a Beaglebone Black.
# It has been reported that an I2C address issue was seen on a Pi.
# ===========================================================================
class HTU21D:
i2c = None
# HTU21D Address
address = 0x40
# Commands
TRIGGER_TEMP_MEASURE_HOLD = 0xE3
TRIGGER_HUMD_MEASURE_HOLD = 0xE5
READ_USER_REG = 0xE7
# Constructor
def __init__(self):
self.i2c = Adafruit_I2C(self.address)
def readUserRegister(self):
"Read the user register byte"
return self.i2c.readU8(self.READ_USER_REG)
def readTemperatureData(self):
"Read 3 temperature bytes from the sensor"
# value[0], value[1]: Raw temperature data
# value[2]: CRC
value = self.i2c.readList(self.TRIGGER_TEMP_MEASURE_HOLD, 3)
# CRC Check
if not self.crc8check(value):
return -255
rawTempData = ( value[0] << 8 ) + value[1]
# Clear the status bits
rawTempData = rawTempData & 0xFFFC;
# Calculate the actual temperature
actualTemp = -46.85 + (175.72 * rawTempData / 65536)
return actualTemp
def readHumidityData(self):
"Read 3 humidity bytes from the sensor"
# value[0], value[1]: Raw relative humidity data
# value[2]: CRC
value = self.i2c.readList(self.TRIGGER_HUMD_MEASURE_HOLD, 3)
# CRC Check
if not self.crc8check(value):
return -255
rawRHData = ( value[0] << 8 ) + value[1]
# Clear the status bits
rawRHData = rawRHData & 0xFFFC;
# Calculate the actual RH
actualRH = -6 + (125.0 * rawRHData / 65536)
return actualRH
def crc8check(self, value):
"Calulate the CRC8 for the data received"
# Ported from Sparkfun Arduino HTU21D Library: https://github.com/sparkfun/HTU21D_Breakout
remainder = ( ( value[0] << 8 ) + value[1] ) << 8
remainder |= value[2]
# POLYNOMIAL = 0x0131 = x^8 + x^5 + x^4 + 1
# divsor = 0x988000 is the 0x0131 polynomial shifted to farthest left of three bytes
divsor = 0x988000
for i in range(0, 16):
if( remainder & 1 << (23 - i) ):
remainder ^= divsor
divsor = divsor >> 1
if remainder == 0:
return True
else:
return False
| apache-2.0 | Python | |
3b42aa408472755ecb4c63d60f8bb14f43b4e930 | Write high-level code for vector-based algorithms (inner product, cross product and norm). | jonancm/viennagrid-python,jonancm/viennagrid-python,jonancm/viennagrid-python | viennagrid-python/viennagrid/algorithms.py | viennagrid-python/viennagrid/algorithms.py | #-*- coding: utf-8 -*-
import viennagrid_wrapper as _wrapper
_SUPPORTED_NORMS = (1, 2, 'inf')
###########################
# VECTOR-BASED ALGORITHMS #
###########################
def inner_prod(point1, point2):
# Try to get method 'inner_prod' from 'point1'. If it doesn't have the method,
# it means it's not a cartesian point. Thus, convert to cartesian coordinates
# and get the method. If it still doesn't have the method, raise an exception.
try:
inner_prod_fn = point1.__getattribute__('inner_prod')
except AttributeError:
casted_pnt1 = point1.to_cartesian()
try:
inner_prod_fn = casted_pnt1.__getattribute__('inner_prod')
except AttributeError:
raise TypeError('point1 has no method named inner_prod')
else:
casted_pnt1 = point1
# If point types are equal, simply calculate the inner product. If they're not
# equal, try to convert 'point2' to the type of 'point1'. If types are still
# different, it means that both points are of incompatible types
# (i.e. incompatible dimensions).
if casted_pnt1.__class__ is point2.__class__:
return inner_prod_fun(point2)
else:
casted_pnt2 = point2.to_cartesian()
if casted_pnt1.__class__ is casted_pnt2.__class__:
return inner_prod_fun(casted_pnt2)
else:
raise TypeError('incompatible point types')
def cross_prod(point1, point2):
# Try to get method 'cross_prod' from 'point1'. If it doesn't have the method,
# it means it's not a cartesian point. Thus, convert to cartesian coordinates
# and get the method. If it still doesn't have the method, raise an exception.
try:
cross_prod_fn = point1.__getattribute__('cross_prod')
except AttributeError:
casted_pnt1 = point1.to_cartesian()
try:
cross_prod_fn = casted_pnt1.__getattribute__('cross_prod')
except AttributeError:
raise TypeError('point1 has no method named cross_prod')
else:
casted_pnt1 = point1
# If point types are equal, simply calculate the cross product. If they're not
# equal, try to convert 'point2' to the type of 'point1'. If types are still
# different, it means that both points are of incompatible types
# (i.e. incompatible dimensions).
if casted_pnt1.__class__ is point2.__class__:
return cross_prod_fn(point2)
else:
casted_pnt2 = point2.to_cartesian()
if casted_pnt1.__class__ is casted_pnt2.__class__:
return cross_prod_fn(casted_pnt2)
else:
raise TypeError('incompatible point types')
def norm(point, norm_type=2):
if norm_type in _SUPPORTED_NORMS:
norm_type = str(norm_type)
norm_fn = _wrapper.__getattribute__('norm_%(norm_type)s' % locals())
return norm_fn(point)
else:
raise ValueError('unsupported norm type: %(norm_type)s')
| mit | Python | |
d0b16ba083cafc4eb8da73e56a082a4959c1ac53 | Implement the training with proxy NCA loss | ronekko/deep_metric_learning | main_proxy_nca.py | main_proxy_nca.py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 09 20:49:04 2017
@author: sakurai
"""
import colorama
import numpy as np
import chainer.functions as F
from sklearn.model_selection import ParameterSampler
from lib.functions.n_pair_mc_loss import n_pair_mc_loss
from lib.common.utils import LogUniformDistribution, load_params
from lib.common.train_eval import train
colorama.init()
def squared_distance_matrix(X, Y=None):
if Y is None:
Y = X
return F.sum(((X[:, None] - Y[None]) ** 2), -1)
def squared_distance_matrix2(X, Y=None):
XX = F.batch_l2_norm_squared(X)
if Y is None:
Y = X
YY = XX
else:
YY = F.batch_l2_norm_squared(Y)
m = len(X)
n = len(Y)
distances = -2.0 * F.matmul(X, Y.T)
distances = distances + F.broadcast_to(F.expand_dims(XX, 1), (m, n))
distances = distances + F.broadcast_to(F.expand_dims(YY, 0), (m, n))
# TODO: Is this necessary?
distances = F.relu(distances) # Force to be nonnegative
return distances
def lossfun_one_batch(model, params, batch):
# the first half of a batch are the anchors and the latters
# are the positive examples corresponding to each anchor
xp = model.xp
x_data, c_data = batch
x_data = xp.asarray(x_data)
y = model(x_data) # y must be normalized as unit vectors
# Forcely normalizing the norm of each proxy
# TODO: Is this safe? (This operation is done out of computation graph)
# model.P.array /= xp.linalg.norm(model.P.array, axis=1, keepdims=True)
d = squared_distance_matrix(y, F.normalize(model.P))
prob = F.softmax(-d)
loss = F.log(1.0 / prob[np.arange(len(y)), c_data] - 1.0)
return F.average(loss)
if __name__ == '__main__':
param_filename = 'proxy_nca_cars196.yaml'
random_search_mode = True
random_state = None
num_runs = 10000
save_distance_matrix = False
if random_search_mode:
param_distributions = dict(
learning_rate=LogUniformDistribution(low=1e-6, high=1e-2),
l2_weight_decay=LogUniformDistribution(low=1e-5, high=1e-2),
out_dim=[64, 128],
optimizer=['RMSprop', 'Adam'] # 'RMSPeop' or 'Adam'
)
static_params = dict(
num_epochs=5,
num_batches_per_epoch=1875,
batch_size=32,
# out_dim=128,
# learning_rate=7e-5,
crop_size=224,
normalize_output=True,
# l2_weight_decay=5e-3,
# optimizer='Adam', # 'Adam' or 'RMSPeop'
distance_type='cosine', # 'euclidean' or 'cosine'
dataset='cars196', # 'cars196' or 'cub200_2011' or 'products'
method='clustering' # sampling method for batch construction
)
sampler = ParameterSampler(param_distributions, num_runs, random_state)
for random_params in sampler:
params = {}
params.update(random_params)
params.update(static_params)
stop = train(__file__, lossfun_one_batch, params,
save_distance_matrix)
if stop:
break
else:
print('Train once using config file "{}".'.format(param_filename))
params = load_params(param_filename)
train(__file__, lossfun_one_batch, params, save_distance_matrix)
| mit | Python | |
596b11351ee78d715833ef0681fdc1634a425167 | add show_flirt_references | williballenthin/viv-utils | viv_utils/scripts/show_flirt_references.py | viv_utils/scripts/show_flirt_references.py | import sys
import gzip
import logging
import argparse
import binascii
import flirt
import viv_utils
import viv_utils.flirt
import vivisect.const
logger = logging.getLogger("get_flirt_matches")
def load_flirt_signature(path):
if path.endswith(".sig"):
with open(path, "rb") as f:
sigs = flirt.parse_sig(f.read())
elif path.endswith(".pat"):
with open(path, "rb") as f:
sigs = flirt.parse_pat(f.read().decode("utf-8"))
elif path.endswith(".pat.gz"):
with gzip.open(path, "rb") as f:
sigs = flirt.parse_pat(f.read().decode("utf-8"))
else:
raise ValueError("unexpect signature file extension: " + path)
return sigs
def get_workspace(path, sigpaths):
vw = viv_utils.getWorkspace(path, analyze=False, should_save=False)
vw.analyze()
return vw
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--debug", action="store_true", help="enable debugging output on STDERR"
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="disable all output but errors"
)
parser.add_argument(
"signature",
type=str,
help="use the given signatures to identify library functions, file system paths to .sig/.pat files.",
)
parser.add_argument(
"sample",
type=str,
help="path to sample to analyze",
)
args = parser.parse_args()
if args.quiet:
logging.basicConfig(level=logging.WARNING)
logging.getLogger().setLevel(logging.WARNING)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger("vivisect").setLevel(logging.INFO)
logging.getLogger("vivisect.base").setLevel(logging.INFO)
logging.getLogger("vivisect.impemu").setLevel(logging.INFO)
logging.getLogger("vtrace").setLevel(logging.INFO)
logging.getLogger("envi").setLevel(logging.INFO)
logging.getLogger("envi.codeflow").setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger("vivisect").setLevel(logging.WARNING)
vw = get_workspace(args.sample, [args.signature])
sigs = load_flirt_signature(args.signature)
logger.debug("flirt: sig count: %d", len(sigs))
matcher = flirt.compile(sigs)
seen = set()
for function in vw.getFunctions():
buf = vw.readMemory(function, 0x10000)
for match in matcher.match(buf):
references = list(filter(lambda n: n[1] == "reference" and (function + n[2]) not in seen, match.names))
if not references:
continue
print("matching function: 0x%x" % (function))
print(" candidate match: 0x%x: %s" % (function, match))
print(" references:")
for (ref_name, _, ref_offset) in references:
ref_va = function + ref_offset
seen.add(ref_va)
print(" - 0x%x: %s" % (ref_va, ref_name))
loc = vw.getLocation(ref_va)
loc_va = loc[vivisect.const.L_VA]
print(" loc: 0x%x" % (loc_va))
print(" delta: 0x%x" % (ref_va - loc_va))
size = loc[vivisect.const.L_SIZE]
buf = vw.readMemory(loc_va, size)
print(" bytes: %s" % (binascii.hexlify(buf).decode("ascii")))
print(" %s^" % (" " * (ref_va - loc_va)))
insn = vw.parseOpcode(loc_va)
print(" insn: %s" % (insn))
print(" xrefs:")
for xref in sorted(set(map(lambda x: x[vivisect.const.XR_TO], vw.getXrefsFrom(loc_va)))):
print(" - 0x%x" % (xref))
pass
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | Python | |
b1316b3db89fbee6e6c1ad807e2e36b8b4dd1874 | Add a script to fix garbled activities | kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu | Attic/act-fixup.py | Attic/act-fixup.py | from parliament.models import *
from django.db import transaction, reset_queries
if True:
with transaction.atomic():
print("Documents %d" % Document.objects.count())
for idx, doc in enumerate(Document.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
doc.keywords_changed = True
doc.save(update_fields=['origin_id'])
if True:
with transaction.atomic():
print("Signatures %d" % DocumentSignature.objects.count())
for idx, sign in enumerate(DocumentSignature.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
sign.keywords_changed = True
sign.save(update_fields=['doc'])
if True:
with transaction.atomic():
print("Statements %d" % Statement.objects.count())
for idx, st in enumerate(Statement.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
st.keywords_changed = True
st.save(update_fields=['item'])
| agpl-3.0 | Python | |
ad78abc4073cb26b192629aed9e9f8e3f5d9e94a | Test for GPI newline fix; for geneontology/go-site#1681 | biolink/ontobio,biolink/ontobio | tests/test_gpiwriter.py | tests/test_gpiwriter.py | import io
from ontobio.io import entitywriter, gafgpibridge
def test_header_newline():
gpi_obj = {
'id': "MGI:MGI:1918911",
'label': "0610005C13Rik", # db_object_symbol,
'full_name': "RIKEN cDNA 0610005C13 gene", # db_object_name,
'synonyms': [],
'type': ["gene"], # db_object_type,
'parents': "", # GAF does not have this field, but it's optional in GPI
'xrefs': "", # GAF does not have this field, but it's optional in GPI
'taxon': {
'id': "NCBITaxon:10090"
}
}
entity = gafgpibridge.Entity(gpi_obj)
out = io.StringIO()
gpiwriter = entitywriter.GpiWriter(file=out)
gpiwriter.write_entity(entity)
outlines = out.getvalue().split("\n")
expected_lines = [
"!gpi-version: 1.2",
"MGI\tMGI:1918911\t0610005C13Rik\tRIKEN cDNA 0610005C13 gene\t\tgene\ttaxon:10090\t\t\t",
""
]
assert expected_lines == outlines
| bsd-3-clause | Python | |
4d8dbf66bdee710e5b53863a4852e80c42a2c7a2 | Add a sqlserver test | ess/dd-agent,pfmooney/dd-agent,darron/dd-agent,ess/dd-agent,AntoCard/powerdns-recursor_check,amalakar/dd-agent,oneandoneis2/dd-agent,GabrielNicolasAvellaneda/dd-agent,AniruddhaSAtre/dd-agent,GabrielNicolasAvellaneda/dd-agent,AntoCard/powerdns-recursor_check,truthbk/dd-agent,zendesk/dd-agent,jyogi/purvar-agent,pfmooney/dd-agent,gphat/dd-agent,huhongbo/dd-agent,manolama/dd-agent,oneandoneis2/dd-agent,citrusleaf/dd-agent,AntoCard/powerdns-recursor_check,zendesk/dd-agent,urosgruber/dd-agent,jvassev/dd-agent,manolama/dd-agent,truthbk/dd-agent,brettlangdon/dd-agent,benmccann/dd-agent,relateiq/dd-agent,a20012251/dd-agent,yuecong/dd-agent,remh/dd-agent,takus/dd-agent,urosgruber/dd-agent,PagerDuty/dd-agent,jraede/dd-agent,Mashape/dd-agent,AntoCard/powerdns-recursor_check,polynomial/dd-agent,jshum/dd-agent,packetloop/dd-agent,mderomph-coolblue/dd-agent,eeroniemi/dd-agent,citrusleaf/dd-agent,JohnLZeller/dd-agent,ess/dd-agent,eeroniemi/dd-agent,Wattpad/dd-agent,eeroniemi/dd-agent,amalakar/dd-agent,remh/dd-agent,darron/dd-agent,jraede/dd-agent,mderomph-coolblue/dd-agent,jamesandariese/dd-agent,jamesandariese/dd-agent,truthbk/dd-agent,indeedops/dd-agent,urosgruber/dd-agent,jamesandariese/dd-agent,truthbk/dd-agent,pmav99/praktoras,AniruddhaSAtre/dd-agent,indeedops/dd-agent,PagerDuty/dd-agent,darron/dd-agent,PagerDuty/dd-agent,packetloop/dd-agent,tebriel/dd-agent,tebriel/dd-agent,polynomial/dd-agent,darron/dd-agent,zendesk/dd-agent,citrusleaf/dd-agent,ess/dd-agent,jyogi/purvar-agent,oneandoneis2/dd-agent,relateiq/dd-agent,jraede/dd-agent,cberry777/dd-agent,AniruddhaSAtre/dd-agent,urosgruber/dd-agent,packetloop/dd-agent,amalakar/dd-agent,GabrielNicolasAvellaneda/dd-agent,darron/dd-agent,citrusleaf/dd-agent,pmav99/praktoras,AntoCard/powerdns-recursor_check,polynomial/dd-agent,jvassev/dd-agent,brettlangdon/dd-agent,lookout/dd-agent,pfmooney/dd-agent,c960657/dd-agent,guruxu/dd-agent,pmav99/praktoras,relateiq/dd-agent,indeedops/dd-agent,mderomph-coolblue/dd-agent,joelvanvelden/dd-agent,guruxu/dd-agent,lookout/dd-agent,jshum/dd-agent,eeroniemi/dd-agent,relateiq/dd-agent,yuecong/dd-agent,jvassev/dd-agent,huhongbo/dd-agent,truthbk/dd-agent,yuecong/dd-agent,joelvanvelden/dd-agent,lookout/dd-agent,urosgruber/dd-agent,a20012251/dd-agent,benmccann/dd-agent,pfmooney/dd-agent,pmav99/praktoras,eeroniemi/dd-agent,jvassev/dd-agent,takus/dd-agent,Shopify/dd-agent,a20012251/dd-agent,jyogi/purvar-agent,zendesk/dd-agent,amalakar/dd-agent,Wattpad/dd-agent,oneandoneis2/dd-agent,jraede/dd-agent,jyogi/purvar-agent,PagerDuty/dd-agent,manolama/dd-agent,takus/dd-agent,guruxu/dd-agent,yuecong/dd-agent,a20012251/dd-agent,jamesandariese/dd-agent,oneandoneis2/dd-agent,c960657/dd-agent,huhongbo/dd-agent,ess/dd-agent,gphat/dd-agent,yuecong/dd-agent,manolama/dd-agent,benmccann/dd-agent,takus/dd-agent,AniruddhaSAtre/dd-agent,JohnLZeller/dd-agent,brettlangdon/dd-agent,Wattpad/dd-agent,pfmooney/dd-agent,guruxu/dd-agent,polynomial/dd-agent,AniruddhaSAtre/dd-agent,Shopify/dd-agent,tebriel/dd-agent,takus/dd-agent,joelvanvelden/dd-agent,lookout/dd-agent,tebriel/dd-agent,GabrielNicolasAvellaneda/dd-agent,packetloop/dd-agent,benmccann/dd-agent,jshum/dd-agent,jvassev/dd-agent,Mashape/dd-agent,JohnLZeller/dd-agent,brettlangdon/dd-agent,joelvanvelden/dd-agent,jshum/dd-agent,cberry777/dd-agent,gphat/dd-agent,mderomph-coolblue/dd-agent,manolama/dd-agent,gphat/dd-agent,huhongbo/dd-agent,Shopify/dd-agent,brettlangdon/dd-agent,cberry777/dd-agent,huhongbo/dd-agent,c960657/dd-agent,gphat/dd-agent,GabrielNicolasAvellaneda/dd-agent,Shopify/dd-agent,zendesk/dd-agent,amalakar/dd-agent,jamesandariese/dd-agent,Shopify/dd-agent,benmccann/dd-agent,cberry777/dd-agent,jshum/dd-agent,joelvanvelden/dd-agent,jraede/dd-agent,polynomial/dd-agent,JohnLZeller/dd-agent,remh/dd-agent,indeedops/dd-agent,Mashape/dd-agent,tebriel/dd-agent,Mashape/dd-agent,citrusleaf/dd-agent,relateiq/dd-agent,a20012251/dd-agent,indeedops/dd-agent,Mashape/dd-agent,remh/dd-agent,guruxu/dd-agent,remh/dd-agent,mderomph-coolblue/dd-agent,cberry777/dd-agent,lookout/dd-agent,Wattpad/dd-agent,c960657/dd-agent,jyogi/purvar-agent,c960657/dd-agent,PagerDuty/dd-agent,JohnLZeller/dd-agent,packetloop/dd-agent,Wattpad/dd-agent,pmav99/praktoras | tests/test_sqlserver.py | tests/test_sqlserver.py | import unittest
import logging
from nose.plugins.attrib import attr
from checks import gethostname
from tests.common import get_check
logging.basicConfig()
"""
Run the following on your local SQL Server:
CREATE LOGIN datadog WITH PASSWORD = '340$Uuxwp7Mcxo7Khy';
CREATE USER datadog FOR LOGIN datadog;
GRANT SELECT on sys.dm_os_performance_counters to datadog;
GRANT VIEW SERVER STATE to datadog;
"""
CONFIG = """
init_config:
custom_metrics:
- name: sqlserver.clr.execution
type: gauge
counter_name: CLR Execution
- name: sqlserver.exec.in_progress
type: gauge
counter_name: OLEDB calls
instance_name: Cumulative execution time (ms) per second
- name: sqlserver.db.commit_table_entries
type: gauge
counter_name: Log Flushes/sec
instance_name: ALL
tag_by: db
instances:
- host: 127.0.0.1,1433
username: datadog
password: 340$Uuxwp7Mcxo7Khy
"""
@attr('windows')
class SQLServerTestCase(unittest.TestCase):
def testSqlServer(self):
check, instances = get_check('sqlserver', CONFIG)
check.check(instances[0])
metrics = check.get_metrics()
# Make sure the base metrics loaded
base_metrics = [m[0] for m in check.METRICS]
ret_metrics = [m[0] for m in metrics]
for metric in base_metrics:
assert metric in ret_metrics
# Check our custom metrics
assert 'sqlserver.clr.execution' in ret_metrics
assert 'sqlserver.exec.in_progress' in ret_metrics
assert 'sqlserver.db.commit_table_entries' in ret_metrics
# Make sure the ALL custom metric is tagged
tagged_metrics = [m for m in metrics
if m[0] == 'sqlserver.db.commit_table_entries']
for metric in tagged_metrics:
for tag in metric[3]['tags']:
assert tag.startswith('db')
if __name__ == "__main__":
unittest.main() | bsd-3-clause | Python | |
b70b51d1c43a344a3c408f3da30c6477b311241e | Create __init__.py | carlniger/acitool | acitool/jsondata/Mpod/__init__.py | acitool/jsondata/Mpod/__init__.py | apache-2.0 | Python | ||
9ec45d8b44a63bcd2652de30191b2bf0caf72ab8 | Add tests for backrefs | konstantinoskostis/sqlalchemy-utils,JackWink/sqlalchemy-utils,cheungpat/sqlalchemy-utils,joshfriend/sqlalchemy-utils,marrybird/sqlalchemy-utils,tonyseek/sqlalchemy-utils,rmoorman/sqlalchemy-utils,joshfriend/sqlalchemy-utils,tonyseek/sqlalchemy-utils,spoqa/sqlalchemy-utils | tests/aggregate/test_backrefs.py | tests/aggregate/test_backrefs.py | import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregateValueGenerationForSimpleModelPaths(TestCase):
def create_models(self):
class Thread(self.Base):
__tablename__ = 'thread'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('comments', sa.Column(sa.Integer, default=0))
def comment_count(self):
return sa.func.count('1')
class Comment(self.Base):
__tablename__ = 'comment'
id = sa.Column(sa.Integer, primary_key=True)
content = sa.Column(sa.Unicode(255))
thread_id = sa.Column(sa.Integer, sa.ForeignKey('thread.id'))
thread = sa.orm.relationship(Thread, backref='comments')
self.Thread = Thread
self.Comment = Comment
def test_assigns_aggregates_on_insert(self):
thread = self.Thread()
thread.name = u'some article name'
self.session.add(thread)
comment = self.Comment(content=u'Some content', thread=thread)
self.session.add(comment)
self.session.commit()
self.session.refresh(thread)
assert thread.comment_count == 1
def test_assigns_aggregates_on_separate_insert(self):
thread = self.Thread()
thread.name = u'some article name'
self.session.add(thread)
self.session.commit()
comment = self.Comment(content=u'Some content', thread=thread)
self.session.add(comment)
self.session.commit()
self.session.refresh(thread)
assert thread.comment_count == 1
def test_assigns_aggregates_on_delete(self):
thread = self.Thread()
thread.name = u'some article name'
self.session.add(thread)
self.session.commit()
comment = self.Comment(content=u'Some content', thread=thread)
self.session.add(comment)
self.session.commit()
self.session.delete(comment)
self.session.commit()
self.session.refresh(thread)
assert thread.comment_count == 0
| bsd-3-clause | Python | |
f45f00401daa8446982477cd8ba26c661e577508 | Add dictionary.en.py | nlpub/hyperstar,nlpub/hyperstar,dustalov/projlearn,nlpub/hyperstar | dictionary.en.py | dictionary.en.py | #!/usr/bin/env python3
import argparse
import csv
import os
import random
from collections import defaultdict
import numpy as np
from gensim.models.word2vec import Word2Vec
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
parser = argparse.ArgumentParser(description='LexNet-Based English Dictionary.')
parser.add_argument('--w2v', default='corpus_en.norm-sz100-w8-cb1-it1-min20.w2v', nargs='?',
help='Path to the word2vec model.')
parser.add_argument('--seed', default=228, type=int, nargs='?', help='Random seed.')
args = vars(parser.parse_args())
RANDOM_SEED = args['seed']
random.seed(RANDOM_SEED)
w2v = Word2Vec.load_word2vec_format(args['w2v'], binary=True, unicode_errors='ignore')
w2v.init_sims(replace=True)
print('Using %d word2vec dimensions from "%s".' % (w2v.layer1_size, args['w2v']))
positives_trusted = defaultdict(lambda: list())
negatives = defaultdict(lambda: list())
for dataset in ('K&H+N', 'BLESS', 'ROOT09', 'EVALution'):
for part in ('train', 'val', 'test'):
with open(os.path.join(dataset, part + '.tsv')) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
hyponym, hypernym, relation = row[0], row[1], row[2]
if hyponym not in w2v or hypernym not in w2v:
continue
# (K&H+N, BLESS, ROOT09, EVALution)
if relation in ('hypo', 'hyper', 'HYPER', 'IsA') and hypernym not in positives_trusted[hyponym]:
positives_trusted[hyponym].append(hypernym)
elif relation in ('coord', 'Synonym'):
if hypernym not in negatives[hyponym]:
negatives[hyponym].append(hypernym)
if hyponym not in negatives[hypernym]:
negatives[hypernym].append(hyponym)
positives_untrusted = defaultdict(lambda: list())
with open('en_ps59g-rnk3-min100-nomwe-39k.csv') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
hyponym, hypernym, frequency = row[0], row[1], float(row[2])
if hyponym in w2v and hypernym in w2v and hypernym not in positives_untrusted[hyponym]:
positives_untrusted[hyponym].append(hypernym)
keys_trusted = [k for k in positives_trusted.keys() if len(positives_trusted[k]) > 0]
trusted_train, trusted_validation_test = train_test_split(np.arange(len(keys_trusted), dtype='int32'), test_size=.4,
random_state=RANDOM_SEED)
trusted_validation, trusted_test = train_test_split(trusted_validation_test, test_size=.5, random_state=RANDOM_SEED)
hypernyms_train = {k: positives_trusted[k] for i in trusted_train for k in (keys_trusted[i],)}
for hyponym, hypernyms in positives_untrusted.items():
if hyponym in hypernyms_train:
for hypernym in hypernyms:
if not hypernym in hypernyms_train[hyponym]:
hypernyms_train[hyponym].append(hypernym)
hypernyms_validation = {k: positives_trusted[k] for i in trusted_validation for k in (keys_trusted[i],)}
hypernyms_test = {k: positives_trusted[k] for i in trusted_test for k in (keys_trusted[i],)}
subsumptions_train = [(x, y) for x, ys in hypernyms_train.items() for y in ys]
subsumptions_validation = [(x, y) for x, ys in hypernyms_validation.items() for y in ys]
subsumptions_test = [(x, y) for x, ys in hypernyms_test.items() for y in ys]
def write_subsumptions(subsumptions, filename):
with open(filename, 'w', newline='') as f:
writer = csv.writer(f, dialect='excel-tab', lineterminator='\n')
for pair in subsumptions:
writer.writerow(pair)
write_subsumptions(subsumptions_train, 'subsumptions-train.txt')
write_subsumptions(subsumptions_validation, 'subsumptions-validation.txt')
write_subsumptions(subsumptions_test, 'subsumptions-test.txt')
with open('synonyms.txt', 'w', newline='') as f:
writer = csv.writer(f, dialect='excel-tab', lineterminator='\n')
for word, words in negatives.items():
writer.writerow((word, ','.join(words)))
| mit | Python | |
8bf521bf26af93f13043ee6e0d70070d49f76f68 | Implement the cipher map problem. | edwardzhu/checkio-solution | Home/cipherMap.py | Home/cipherMap.py | import operator
def checkio(arr):
index = convertMapToTuples(arr[0])
cube = convertCubeToList(arr[1])
output = ''
dimension = len(arr[0])
for i in range(0, 4):
index.sort(key=operator.itemgetter(0, 1))
for idx in index:
output = '{0}{1}'.format(output, cube[idx[0]][idx[1]])
index = rotateCube(index, dimension)
return output
def convertCubeToList(arr):
result = []
for i in range(len(arr)):
row = []
for j in range(len(arr[i])):
row.append(arr[i][j])
result.append(row)
return result
def convertMapToTuples(arr):
result = []
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] != '.':
result.append((i, j))
return result
def rotateCube(tuples, dimension):
result = []
for item in tuples:
result.append((item[1], dimension - item[0] - 1))
return result
if __name__ == "__main__":
assert checkio([[
'X...',
'..X.',
'X..X',
'....'],[
'itdf',
'gdce',
'aton',
'qrdi']
]) == 'icantforgetiddqd', 'Test1'
assert checkio([[
'....',
'X..X',
'.X..',
'...X'],[
'xhwc',
'rsqx',
'xqzz',
'fyzr']
]) == 'rxqrwsfzxqxzhczy', "Test2"
| mit | Python | |
e583d977c7089f21841890b7eb50c824db153202 | Test for unicode characters in grammars. | igordejanovic/textX,igordejanovic/textX,igordejanovic/textX | tests/functional/test_unicode.py | tests/functional/test_unicode.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
def test_unicode_grammar_from_string():
"""
Test grammar with unicode char given in grammar string.
"""
grammar = """
First:
'first' a = Second
;
Second:
"Ω"|"±"|"♪"
;
"""
metamodel = metamodel_from_str(grammar)
assert metamodel
def test_unicode_grammar_from_file():
"""
"""
| mit | Python | |
8117b3bee367afea107f7ef4b2003006e0ea857e | Create anteater.py | nullx31/anteater.py | anteater.py | anteater.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
###########################################
# 27.11.2012 | word scraper bot wsb.py #
# by PirateSecurity # http://piratesec.de #
###########################################
import mechanize
import cookielib
import urllib2
from bs4 import BeautifulSoup
import re
import sys
import unicodedata
import os
import urlparse
# Ich bin ein Browser, bitte block mich nicht :_)
browser = mechanize.Browser()
cookies = cookielib.MozillaCookieJar('cookie_jar')
browser.set_cookiejar(cookies)
browser.set_handle_redirect(True)
browser.set_handle_robots(False)
browser.set_handle_equiv(True)
browser.set_handle_gzip(False)
browser.set_handle_referer(True)
browser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
browser.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:14.0) Gecko/20120405 Firefox/14.0a1')]
# Listen
linklist = []
wordlist = []
# Container
wordfile = open('wordlist.txt', 'w')
linkfile = open('links.txt', 'w')
# Koch die Suppe und Filter sie mir
initiallink = raw_input("enter target to scrape for words_> ")
linklist.append(initiallink)
print "scraping... Press Ctrl+C to exit."
# Main Loop
###########
for link in linklist:
try:
soup = BeautifulSoup((browser.open(link)).read())
filteredsoup = re.findall(r"(?:\s|^)(\w+)(?=\s|$)", ((soup.get_text()).encode('utf-8', 'ignore')))
for word in filteredsoup:
if (word not in wordlist) and (len(word) > 2 and len(word) < 12):
wordlist.append(word)
wordfile.write(str(word) + '\n')
for eachnewlink in soup.findAll('a', href=True):
eachnewlink['href'] = urlparse.urljoin(link, eachnewlink['href'])
if eachnewlink not in linklist:
linklist.append(eachnewlink['href'])
linkfile.write(str(eachnewlink['href'].encode('utf-8', 'ignore')) + '\n')
except:
continue
| mit | Python | |
f2c47ccf852e1a2b2a68f4d7ac1e72409ddfad3e | Create scratch.py | dpbroman/satrain,dpbroman/satrain | scratch.py | scratch.py | import os
import urllib
DOWNLOADS_DIR = '/python-downloader/downloaded'
# For every line in the file
for url in open('urls.txt'):
# Split on the rightmost / and take everything on the right side of that
name = url.rsplit('/', 1)[-1]
# Combine the name and the downloads directory to get the local filename
filename = os.path.join(DOWNLOADS_DIR, name)
# Download the file if it does not exist
if not os.path.isfile(filename):
urllib.urlretrieve(url, filename)
| mit | Python | |
8e3b2b6103a591dae2b99d7e219722e0992dae65 | Add CatagoryMgr | vinx13/WikiCrawler | CatagoryManager.py | CatagoryManager.py | from DbHelper import DbHelper
class CatagoryManager(object):
TABLE_NAME = "cactagory"
FIELD_TITLE = "title"
FIELD_COUNT = "count"
def __init__(self):
self.db = DbHelper()
sql = "CREATE TABLE IF NOT EXISTS `" + self.TABLE_NAME + "` (" \
+ self.FIELD_TITLE + " TEXT NOT NULL," \
+ self.FIELD_COUNT + " INT NOT NULL DEFAULT '1'," \
+ "PRIMARY KEY (`" + self.FIELD_TITLE + "`(100)));"
self.db.execute(sql)
def add(self, entry):
sql = "INSERT INTO `" + self.TABLE_NAME + "` VALUES ('" \
+ entry.title + "', 1) " \
+ "ON DUPLICATE KEY UPDATE " + self.FIELD_COUNT + " = " + self.FIELD_COUNT + " + 1;"
self.db.execute(sql)
def clear(self):
sql = "DELETE FROM " + self.TABLE_NAME + ";"
self.db.execute(sql)
| mit | Python | |
dcc64e9fd8bb3cb407959a30a2054fc180596bae | Add Pandas integration unit tests | rwhitt2049/nimble,rwhitt2049/trouve | tests/test_pandas_integration.py | tests/test_pandas_integration.py | from unittest import TestCase
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
| mit | Python | |
1696ca33e644d3cb1138d7ee4c48239b7a757cfd | Add the first script to try a led light | mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp | python_scripts/gpio_test.py | python_scripts/gpio_test.py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
for x in range(0,10):
GPIO.output(7,True)
time.sleep(1)
GPIO.output(7,False)
time.sleep(1)
GPIO.cleanup()
| mit | Python | |
5c091ea20a531692c676f9d4e0f450c0e642f740 | Create module for uptime retrieval | HubbeKing/Hubbot_Twisted | Modules/Uptime.py | Modules/Uptime.py | from collections import OrderedDict
from ModuleInterface import ModuleInterface
from IRCResponse import IRCResponse, ResponseType
import datetime
class Module(ModuleInterface):
triggers = ["uptime"]
help = "uptime -- returns the uptime for the bot"
def onTrigger(self, Hubbot, message):
now = datetime.datetime.now()
timeDelta = now - Hubbot.startTime
return IRCResponse(ResponseType.Say, "I have been running for {}".format(self.deltaTimeToString(timeDelta)), message.ReplyTo)
def deltaTimeToString(self, timeDelta):
"""
@type timeDelta: timedelta
"""
d = OrderedDict()
d['days'] = timeDelta.days
d['hours'], rem = divmod(timeDelta.seconds, 3600)
d['minutes'], d['seconds'] = divmod(rem, 60) # replace _ with d['seconds'] to get seconds
def lex(durationWord, duration):
if duration == 1:
return '{0} {1}'.format(duration, durationWord[:-1])
else:
return '{0} {1}'.format(duration, durationWord)
deltaString = ' '.join([lex(word, number) for word, number in d.iteritems() if number > 0])
return deltaString if len(deltaString) > 0 else 'seconds'
| mit | Python | |
e39270b69e1e8831c177bf4e5051726e6a678407 | Add wsgi python script [ci skip] | ecsnavarretemit/sarai-interactive-maps-backend,ecsnavarretemit/sarai-interactive-maps-backend | app/wsgi.py | app/wsgi.py | import os
import sys
root = os.path.dirname(os.path.realpath(__file__ + '/..'))
# activate the virtual env
activate_this = root + '/venv/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
sys.path.insert(0, root)
# set the environment variable to production
os.environ['FLASK_ENV']="PRODUCTION"
# run the application
from app import app as application
| mit | Python | |
655bf4b4159e70b4a99185a1735ac63c3ee951dc | Add script to filter result by result type. | symbooglix/boogie-runner,symbooglix/boogie-runner | analysis/filter-by-result-type.py | analysis/filter-by-result-type.py | #!/usr/bin/env python
import argparse
import os
import logging
import pprint
import sys
import yaml
# HACK
_file = os.path.abspath(__file__)
_dir = os.path.dirname(os.path.dirname(_file))
sys.path.insert(0, _dir)
from BoogieRunner.ResultType import ResultType
def main(args):
resultTypes = [ r.name for r in list(ResultType)] # Get list of ResultTypes as strings
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), default='-')
parser.add_argument('result_type', choices=resultTypes)
pargs = parser.parse_args(args)
results = yaml.load(pargs.result_yml)
assert isinstance(results, list)
# Get out of requested type
resultCode = ResultType[pargs.result_type].value
count = 0
collected = [ ]
for r in results:
if r['result'] == resultCode:
count += 1
collected.append(r)
logging.info('Count of type {} : {}'.format(pargs.result_type, count))
print(yaml.dump(collected, default_flow_style=False))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | Python | |
983a89d7b400ec71806311cfe6a576d85dd59958 | Create distance.py | ariegg/webiopi-drivers,ariegg/webiopi-drivers | abstractions/sensor/distance/distance.py | abstractions/sensor/distance/distance.py | # This code has to be added to __init__.py in folder .../devices/sensor
class Distance():
def __family__(self):
return "Distance"
def __getMillimeter__(self):
raise NotImplementedError
@api("Distance", 0)
@request("GET", "sensor/distance/*")
@response(contentType=M_JSON)
def distanceWildcard(self):
values = {}
distance = self.__getMillimeter__()
values["mm"] = "%.02f" % distance
values["cm"] = "%.02f" % (distance / 10)
values["m"] = "%.02f" % (distance / 1000)
values["in"] = "%.02f" % (distance / 25.4)
values["ft"] = "%.02f" % (distance / 25.4 / 12)
values["yd"] = "%.02f" % (distance / 25.4 / 36)
return values
@api("Distance")
@request("GET", "sensor/distance/mm")
@response("%.02f")
def getMillimeter(self):
return self.__getMillimeter__()
@api("Distance")
@request("GET", "sensor/distance/cm")
@response("%.02f")
def getCentimeter(self):
return self.getMillimeter() / 10
@api("Distance")
@request("GET", "sensor/distance/m")
@response("%.02f")
def getMeter(self):
return self.getMillimeter() / 1000
@api("Distance")
@request("GET", "sensor/distance/in")
@response("%.02f")
def getInch(self):
return self.getMillimeter() / 25.4
@api("Distance")
@request("GET", "sensor/distance/ft")
@response("%.02f")
def getFoot(self):
return self.getInch() / 12
@api("Distance")
@request("GET", "sensor/distance/yd")
@response("%.02f")
def getYard(self):
return self.getInch() / 36
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.