commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
ce1a080c01a5f792d128278fbb035f50e106e959 | set up general logging and twitter stream log | geotweet/log.py | geotweet/log.py | import logging
from logging.handlers import TimedRotatingFileHandler
import os
LOG_NAME = 'geotweet'
LOG_FILE = os.getenv('GEOTWEET_LOG', '/tmp/geotweet.log')
LOG_LEVEL = logging.DEBUG
TWITTER_LOG_NAME = 'twitter-stream'
def get_logger():
logger = logging.getLogger(LOG_NAME)
logger.setLevel(LOG_LEVEL)
fh = logging.FileHandler(LOG_FILE)
logformat = '%(levelname)s %(asctime)s: %(message)s'
formatter = logging.Formatter(logformat)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def get_rotating_logger(logfile, interval, when="M"):
logger = logging.getLogger(TWITTER_LOG_NAME)
logger.setLevel(LOG_LEVEL)
handler = TimedRotatingFileHandler(logfile, when=when, interval=interval)
logger.addHandler(handler)
return logger
logger = get_logger()
| Python | 0 | |
3b8d2cc0279e4da1ab758251f00fd065d951df53 | Add base for `help` command | foxybot/commands/help.py | foxybot/commands/help.py | """Command to retrieve help for other commands and topics"""
from command import AbstractCommand, bot_command
from bot_help import HelpManager
@bot_command
class Help(AbstractCommand):
_aliases = ('help', 'h')
async def execute(self, shards, client, msg):
try:
args, extra = self._parser.parse_known_args(msg.content.split()[1:])
except SystemExit as ex:
await client.send_message(msg.channel, 'Something very very bad happened')
return
# await client.send_message(msg.channel, (args, extra))
await client.send_message(msg.channel, "Hello, World!")
@property
def name(self):
return self._name
@property
def aliases(self):
return self._aliases
| Python | 0.000004 | |
453df6abe7741fe0f24c03754b26c197fa282656 | Create ValidateBST_002_iter.py | leetcode/098-Validate-Binary-Search-Tree/ValidateBST_002_iter.py | leetcode/098-Validate-Binary-Search-Tree/ValidateBST_002_iter.py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
stack = [root]
pre = None
while stack != [] and stack[0]:
p = stack.pop()
while p:
stack.append(p)
p = p.left
p = stack.pop()
if pre and pre.val >= p.val:
return False
pre = p
stack.append(p.right)
return True
| Python | 0.000001 | |
0e5bbc4df461c17ff7d1297ee4236afaa9e52a96 | Create solution.py | leetcode/easy/remove_duplicates_from_sorted_array/py/solution.py | leetcode/easy/remove_duplicates_from_sorted_array/py/solution.py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Without this check, the function
# will return slow + 1 when called
# with an empty array. This would
# be an error.
if len(nums) == 0:
return 0
slow = 0
for fast in range(len(nums)):
if nums[slow] != nums[fast]:
slow += 1
nums[slow] = nums[fast]
return slow + 1
| Python | 0.000018 | |
e8fa15603b275a690d96e37ab9dc560e68dedbb1 | Add tests | test/test_02.py | test/test_02.py | import unittest
import os
import sys
import lrmq
import timeout_decorator
import tempfile
import pickle
import struct
import asyncio
TEST_TIMEOUT = 5 # it can fail in slow environment
def read_log(fn):
logs = []
with open(fn, "rb") as f:
while True:
slen = f.read(4)
if not slen:
break
slen = struct.unpack(">L", slen)[0]
data = pickle.loads(f.read(slen))
logs.append(data)
assert len(logs) > 0
return logs
class TestRPC(unittest.TestCase):
def setUp(self):
# reinitialize loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# prepare test folder
self.logdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.logdir.cleanup()
@timeout_decorator.timeout(TEST_TIMEOUT)
def test_single_master(self):
logname = os.path.join(self.logdir.name, "single_master")
code = lrmq.main({
"debuglogger": logname + ".pkl",
"loglevel": "DEBUG",
"log": logname + "_hub.log",
"agents": [{
"type": "stdio",
"cmd": "test/msc1.py",
"id": "test02_master",
"name": "test02_master",
"log": logname + "_master.log",
"loglevel": "DEBUG",
"args": ["master"]}
]
})
assert code == 0
for log in read_log(logname + ".pkl"):
log_id = None
if "log_id" in log:
print(log)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
011fd9f5414d9f824a2c120084b98a1dc34cba0f | Add github_stargazers.py | src/github_stargazers.py | src/github_stargazers.py | import typing
import os
from bs4 import BeautifulSoup
import click
from halo import Halo
import requests
class UsernameRepositoryError(Exception):
def __init__(self) -> None:
super().__init__("Argument should be of form username/repository.")
class GitHub:
"""Creates a GitHub instance for listing the stargazers of a given repository
and checking if a user's full name is in the list of stargazers.
The constructor requires a string of the following form: `username/repository`,
both representing the GitHub meaning of them.
"""
__GITHUB_URL: str = "https://github.com"
__STARGAZERS_URL_SUFFIX: str = "/stargazers"
__PAGE_SUFFIX: str = "?page="
__OK_STATUS_CODE: int = 200
__TOO_MANY_REQUESTS_STATUS_CODE: int = 429
__MARK_END_OF_STARGAZERS: typing.List[str] = ['This repository has no more stargazers.']
def __init__(self, username_and_repository: str) -> None:
self.__username, self.__repository = self.__extract_user_and_repo(username_and_repository)
self.__repository_url = self.__get_repository_url()
self.__stargazers_base_url = self.__repository_url + self.__STARGAZERS_URL_SUFFIX
@classmethod
def __extract_user_and_repo(cls, username_and_repository: str) -> typing.Optional[typing.Tuple[str, str]]:
components: typing.List[str] = username_and_repository.split("/")
if len(components) != 2:
raise UsernameRepositoryError()
return components[0], components[1]
def __get_repository_url(self):
return os.path.join(self.__GITHUB_URL, self.__username, self.__repository)
def __get_soup(self, url: str) -> typing.Optional[BeautifulSoup]:
response = requests.get(url)
status_code: int = requests.get(url).status_code
if status_code == self.__OK_STATUS_CODE:
return BeautifulSoup(response.text, "html.parser")
if status_code == self.__TOO_MANY_REQUESTS_STATUS_CODE:
Halo().fail("Too many requests.")
print("{} HTTP".format(status_code))
return None
def __extract_stargazers_from_url(self, url: str) -> typing.Optional[typing.List[str]]:
spinner = Halo(text="Loading... " + url, spinner="dots")
spinner.start()
soup = self.__get_soup(url)
if not soup:
return None
h3_components = soup.find_all('h3')
users: typing.List[str] = []
for component in h3_components:
users.append(component.get_text())
spinner.stop()
if users == self.__MARK_END_OF_STARGAZERS:
return []
return users
def __get_url_page_template(self, page_number: int) -> str:
return self.__stargazers_base_url + self.__PAGE_SUFFIX + str(page_number)
def get_all_stargazers(self) -> typing.List[str]:
page_number: int = 1
all_stargazers: typing.List[str] = []
while True:
current_url: str = self.__get_url_page_template(page_number)
current_stargazers: typing.List[str] = self.__extract_stargazers_from_url(current_url)
if not current_stargazers:
break
all_stargazers += current_stargazers
page_number += 1
return sorted(all_stargazers)
def is_stargazer(self, user: str) -> bool:
page_number: int = 1
while True:
current_url: str = self.__get_url_page_template(page_number)
current_stargazers: typing.List[str] = self.__extract_stargazers_from_url(current_url)
if not current_stargazers:
break
if user in current_stargazers:
return True
page_number += 1
return False
@click.command()
@click.argument('username_and_repository')
@click.option('--user', default=None, help='User name to see if it is a stargazer')
def process_command(username_and_repository, user):
github = GitHub(username_and_repository)
if not user:
stargazers: typing.List[str] = github.get_all_stargazers()
print("Stargazers: ")
for stargazer in stargazers:
print(stargazer)
return
if github.is_stargazer(user):
Halo().succeed("Stargazer")
else:
Halo().fail("Not a Stargazer")
def main():
process_command() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
main()
| Python | 0 | |
74550ef0c76a941c473c8d024ccc0a0403631c49 | Add basic structure for "/glossary" routes test | wqflask/tests/integration/test_markdown_routes.py | wqflask/tests/integration/test_markdown_routes.py | "Integration tests for markdown routes"
import unittest
from bs4 import BeautifulSoup
from wqflask import app
class TestGenMenu(unittest.TestCase):
"""Tests for glossary"""
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_glossary_page(self):
"""Test that the glossary page is rendered properly"""
response = self.app.get('/glossary', follow_redirects=True)
pass
| Python | 0.000021 | |
5e1c48f9d00266290a8739f88085f050b1baa805 | Add test_backend.py in preparation for migrating backend to rigor's database layer | test_backend.py | test_backend.py | #!/usr/bin/env python
import types
import pprint
import backend
import config
from utils import *
DBNAME = config.CROWD_DB
debugMain('dbQueryDict')
sql = 'SELECT COUNT(*) FROM image;'
conn = backend.getDbConnection(DBNAME)
gen = backend.dbQueryDict(conn, sql)
assert isinstance(gen, types.GeneratorType)
rows = list(gen)
assert len(rows) == 1
assert isinstance(rows[0], dict)
assert 'count' in rows[0]
debugMain('getDatabaseNames')
names = backend.getDatabaseNames()
assert DBNAME in names
debugDetail(names)
debugMain('getTags')
tags = backend.getTags(DBNAME)
assert len(tags) > 0
assert isinstance(tags[0], basestring)
assert sorted(tags)[0] == 'align=center'
debugMain('getImage by id')
ID = 1
imgDict = backend.getImage(DBNAME, id=ID)
assert isinstance(imgDict, dict)
assert 'id' in imgDict
assert imgDict['id'] == ID
assert 'tags' in imgDict
assert len(imgDict['tags']) > 0
assert isinstance(imgDict['tags'][0], basestring)
debugMain('searchImages')
queryDict = dict(
database_name = DBNAME,
has_tags = ['align=left'],
page = 1,
max_count = 4,
)
count, results = backend.searchImages(queryDict)
assert count > 1
assert isinstance(results, list)
assert isinstance(results[0], dict)
assert 'tags' in results[0]
debugMain('getImage by locator')
LOCATOR = '4075c8de-fb2e-41e8-831b-ea4bdcb5a6a3'
imgDict = backend.getImage(DBNAME, locator=LOCATOR)
assert isinstance(imgDict, dict)
assert 'locator' in imgDict
assert imgDict['locator'] == LOCATOR
assert 'tags' in imgDict
assert len(imgDict['tags']) > 0
assert isinstance(imgDict['tags'][0], basestring)
debugMain('getImageAnnotations')
ID = 1
annotations = backend.getImageAnnotations(DBNAME, ID)
assert isinstance(annotations, list)
assert isinstance(annotations[0], dict)
assert 'domain' in annotations[0]
print green('===== success =====')
| Python | 0 | |
77b266028ce11eced065660bbda1acab04d30dbf | Add pagination tests for categories | tests/api/pagination/test_products.py | tests/api/pagination/test_products.py | import pytest
from prices import Money
from saleor.product.models import Category, Product
from ..utils import get_graphql_content
@pytest.fixture
def categories_for_pagination(product_type):
categories = Category.tree.build_tree_nodes(
{
"name": "Category2",
"slug": "cat1",
"children": [
{"name": "CategoryCategory1", "slug": "cat_cat1"},
{"name": "CategoryCategory2", "slug": "cat_cat2"},
{"name": "Category1", "slug": "cat2"},
{"name": "Category3", "slug": "cat3"},
],
}
)
categories = Category.objects.bulk_create(categories)
Product.objects.bulk_create(
[
Product(
name="Prod1",
slug="prod1",
product_type=product_type,
price=Money("10.00", "USD"),
category=categories[4],
),
Product(
name="Prod2",
slug="prod2",
product_type=product_type,
price=Money("10.00", "USD"),
category=categories[4],
),
Product(
name="Prod3",
slug="prod3",
product_type=product_type,
price=Money("10.00", "USD"),
category=categories[2],
),
]
)
return categories
QUERY_CATEGORIES_PAGINATION = """
query (
$first: Int, $last: Int, $after: String, $before: String,
$sortBy: CategorySortingInput, $filter: CategoryFilterInput
){
categories(
first: $first, last: $last, after: $after, before: $before,
sortBy: $sortBy, filter: $filter
) {
edges {
node {
name
}
}
pageInfo{
startCursor
endCursor
hasNextPage
hasPreviousPage
}
}
}
"""
@pytest.mark.parametrize(
"sort_by, categories_order",
[
(
{"field": "NAME", "direction": "ASC"},
["Category1", "Category2", "Category3"],
),
(
{"field": "NAME", "direction": "DESC"},
["CategoryCategory2", "CategoryCategory1", "Category3"],
),
(
{"field": "SUBCATEGORY_COUNT", "direction": "ASC"},
["Category2", "CategoryCategory1", "CategoryCategory2"],
),
(
{"field": "PRODUCT_COUNT", "direction": "ASC"},
["CategoryCategory1", "Category1", "CategoryCategory2"],
),
],
)
def test_categories_pagination_with_sorting(
sort_by, categories_order, staff_api_client, categories_for_pagination,
):
page_size = 3
variables = {"first": page_size, "after": None, "sortBy": sort_by}
response = staff_api_client.post_graphql(QUERY_CATEGORIES_PAGINATION, variables,)
content = get_graphql_content(response)
categories_nodes = content["data"]["categories"]["edges"]
assert categories_order[0] == categories_nodes[0]["node"]["name"]
assert categories_order[1] == categories_nodes[1]["node"]["name"]
assert categories_order[2] == categories_nodes[2]["node"]["name"]
assert len(categories_nodes) == page_size
@pytest.mark.parametrize(
"filter_by, categories_order",
[
({"search": "CategoryCategory"}, ["CategoryCategory1", "CategoryCategory2"]),
({"search": "cat_cat"}, ["CategoryCategory1", "CategoryCategory2"]),
({"search": "Category1"}, ["CategoryCategory1", "Category1"]),
],
)
def test_categories_pagination_with_filtering(
filter_by, categories_order, staff_api_client, categories_for_pagination,
):
page_size = 2
variables = {"first": page_size, "after": None, "filter": filter_by}
response = staff_api_client.post_graphql(QUERY_CATEGORIES_PAGINATION, variables,)
content = get_graphql_content(response)
categories_nodes = content["data"]["categories"]["edges"]
assert categories_order[0] == categories_nodes[0]["node"]["name"]
assert categories_order[1] == categories_nodes[1]["node"]["name"]
assert len(categories_nodes) == page_size
| Python | 0.000001 | |
961040f13f1d2b2d8aea019a6649f29f858d2a09 | Correct cleanup code for shutting down firefox in python bindings | py/selenium/webdriver/firefox/webdriver.py | py/selenium/webdriver/firefox/webdriver.py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import shutil
import socket
import sys
from .firefox_binary import FirefoxBinary
from .service import Service
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.extension_connection import ExtensionConnection
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30,
capabilities=None, proxy=None, executable_path='wires'):
self.binary = firefox_binary
self.profile = firefox_profile
if self.profile is None:
self.profile = FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX
if "marionette" in capabilities and capabilities['marionette'] is True:
# Let's use Marionette! WOOOOHOOOOO!
if "binary" in capabilities:
self.binary = capabilities["binary"]
self.service = Service(executable_path, firefox_binary=self.binary)
self.service.start()
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=capabilities,
keep_alive=True)
else:
# Oh well... sometimes the old way is the best way.
if self.binary is None:
self.binary = FirefoxBinary()
if proxy is not None:
proxy.add_to_capabilities(capabilities)
RemoteWebDriver.__init__(self,
command_executor=ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout),
desired_capabilities=capabilities,
keep_alive=True)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except (http_client.BadStatusLine, socket.error):
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
if "marionette" in self.capabilities and self.capabilities['marionette'] is True:
self.service.stop()
else:
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
| # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import shutil
import socket
import sys
from .firefox_binary import FirefoxBinary
from .service import Service
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.extension_connection import ExtensionConnection
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30,
capabilities=None, proxy=None, executable_path='wires'):
self.binary = firefox_binary
self.profile = firefox_profile
if self.profile is None:
self.profile = FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX
if "marionette" in capabilities and capabilities['marionette'] is True:
# Let's use Marionette! WOOOOHOOOOO!
if "binary" in capabilities:
self.binary = capabilities["binary"]
self.service = Service(executable_path, firefox_binary=self.binary)
self.service.start()
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=capabilities,
keep_alive=True)
else:
# Oh well... sometimes the old way is the best way.
if self.binary is None:
self.binary = FirefoxBinary()
if proxy is not None:
proxy.add_to_capabilities(capabilities)
RemoteWebDriver.__init__(self,
command_executor=ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout),
desired_capabilities=capabilities,
keep_alive=True)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except (http_client.BadStatusLine, socket.error):
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
self.service.stop()
'''try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))'''
@property
def firefox_profile(self):
return self.profile
| Python | 0.000014 |
986ff101ce224494a5cdb047a1aefd99c8a6d840 | Add an aioredis example | examples/sanic_aioredis_example.py | examples/sanic_aioredis_example.py | """ To run this example you need additional aioredis package
"""
from sanic import Sanic, response
import aioredis
app = Sanic(__name__)
@app.route("/")
async def handle(request):
async with request.app.redis_pool.get() as redis:
await redis.set('test-my-key', 'value')
val = await redis.get('test-my-key')
return response.text(val.decode('utf-8'))
@app.listener('before_server_start')
async def before_server_start(app, loop):
app.redis_pool = await aioredis.create_pool(
('localhost', 6379),
minsize=5,
maxsize=10,
loop=loop
)
@app.listener('after_server_stop')
async def after_server_stop(app, loop):
app.redis_pool.close()
await app.redis_pool.wait_closed()
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000)
| Python | 0.000038 | |
e355a926155355ccc5d8b545534f331bdb683f02 | Add management | podcastsync.py | podcastsync.py | import click
from getpass import getpass
from gposerver import create_app, db, User, Device, EpisodeAction
app = create_app()
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db, User=User, Device=Device, EpisodeAction=EpisodeAction)
@app.cli.command()
def adduser():
"""Add new user."""
username = input("Username: ")
password = getpass("Password: ")
u = User(username, password)
db.session.add(u)
db.session.commit()
@app.cli.command()
def init():
"""Initialise database."""
db.create_all()
| Python | 0.000001 | |
1786ebacb85b2ddce816fb21b80285d991761695 | Implement classes to be used by the deserializer | poyo/_nodes.py | poyo/_nodes.py | # -*- coding: utf-8 -*-
class TreeElement(object):
"""Helper class to identify internal classes."""
def __init__(self, **kwargs):
pass
class ContainerMixin(object):
"""Mixin that can hold TreeElement instances.
Containers can be called to return a dict representation.
"""
def __init__(self, **kwargs):
self._children = []
super(ContainerMixin, self).__init__(**kwargs)
def __iter__(self):
for c in self._children:
yield c
def __call__(self):
return {c.name: c() for c in self}
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child)
class ChildMixin(object):
"""Mixin that can be attached to Container object."""
def __init__(self, **kwargs):
parent = kwargs['parent']
if not isinstance(parent, ContainerMixin):
raise ValueError(
'Parent of ChildMixin instance needs to be a Container.'
)
parent.add_child(self)
super(ChildMixin, self).__init__(**kwargs)
class Root(ContainerMixin, TreeElement):
"""Pure Container class to represent the root of a YAML config."""
def __init__(self, **kwargs):
super(Root, self).__init__(**kwargs)
self.level = -1
class Section(ContainerMixin, ChildMixin, TreeElement):
"""Class that can act as a Child, but also as a Container."""
def __init__(self, name, level, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.level = level
class Simple(ChildMixin, TreeElement):
"""Class that can solely be used as a Child, f.i. simple key value pairs
in a config.
"""
def __init__(self, name, level, value, **kwargs):
super(Simple, self).__init__(**kwargs)
self.name = name
self.level = level
self.value = value
def __call__(self):
return self.value
| Python | 0 | |
9f276fba97318431d85c08fc0718b30bf39ed1bf | Create add-one-row-to-tree.py | Python/add-one-row-to-tree.py | Python/add-one-row-to-tree.py | # Time: O(n)
# Space: O(h)
# Given the root of a binary tree, then value v and depth d,
# you need to add a row of nodes with value v at the given depth d. The root node is at depth 1.
#
# The adding rule is: given a positive integer depth d,
# for each NOT null tree nodes N in depth d-1, create two tree nodes
# with value v as N's left subtree root and right subtree root.
# And N's original left subtree should be the left subtree of the new left subtree root,
# its original right subtree should be the right subtree of the new right subtree root.
# If depth d is 1 that means there is no depth d-1 at all,
# then create a tree node with value v as the new root of the whole original tree,
# and the original tree is the new root's left subtree.
#
# Example 1:
# Input:
# A binary tree as following:
# 4
# / \
# 2 6
# / \ /
# 3 1 5
#
# v = 1
#
# d = 2
#
# Output:
# 4
# / \
# 1 1
# / \
# 2 6
# / \ /
# 3 1 5
#
# Example 2:
# Input:
# A binary tree as following:
# 4
# /
# 2
# / \
# 3 1
#
# v = 1
#
# d = 3
#
# Output:
# 4
# /
# 2
# / \
# 1 1
# / \
# 3 1
# Note:
# 1. The given d is in range [1, maximum depth of the given tree + 1].
# 2. The given binary tree has at least one tree node.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def addOneRow(self, root, v, d):
"""
:type root: TreeNode
:type v: int
:type d: int
:rtype: TreeNode
"""
if d in (0, 1):
node = TreeNode(v)
if d == 1:
node.left = root
else:
node.right = root
return node
if root and d >= 2:
root.left = self.addOneRow(root.left, v, d-1 if d > 2 else 1)
root.right = self.addOneRow(root.right, v, d-1 if d > 2 else 0)
return root
| Python | 0.000017 | |
2e7e83a0c3b789a0d0ba89134b64a0f6b723c3af | add forgotten path-building test | bids/layout/tests/test_path_building.py | bids/layout/tests/test_path_building.py | import pytest
from bids.layout import BIDSLayout
from os.path import join, abspath, sep
from bids.tests import get_test_data_path
@pytest.fixture(scope='module')
def layout():
data_dir = join(get_test_data_path(), '7t_trt')
return BIDSLayout(data_dir)
def test_bold_construction(layout):
ents = dict(subject='01', run=1, task='rest', suffix='bold')
assert layout.build_path(ents) == "sub-01/func/sub-01_task-rest_run-1_bold.nii.gz"
ents['acquisition'] = 'random'
assert layout.build_path(ents) == "sub-01/func/sub-01_task-rest_acq-random_run-1_bold.nii.gz" | Python | 0.000002 | |
59e647910dd77f0d09380e09f878f6a6fe4f4eda | Add unit tests for bytes_to_human formatter | test/units/module_utils/common/text/formatters/test_bytes_to_human.py | test/units/module_utils/common/text/formatters/test_bytes_to_human.py | # -*- coding: utf-8 -*-
# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.text.formatters import bytes_to_human
@pytest.mark.parametrize(
'input_data,expected',
[
(0, u'0.00 Bytes'),
(0.5, u'0.50 Bytes'),
(0.54, u'0.54 Bytes'),
(1024, u'1.00 KB'),
(1025, u'1.00 KB'),
(1536, u'1.50 KB'),
(1790, u'1.75 KB'),
(1048576, u'1.00 MB'),
(1073741824, u'1.00 GB'),
(1099511627776, u'1.00 TB'),
(1125899906842624, u'1.00 PB'),
(1152921504606846976, u'1.00 EB'),
(1180591620717411303424, u'1.00 ZB'),
(1208925819614629174706176, u'1.00 YB'),
]
)
def test_bytes_to_human(input_data, expected):
"""Test of bytes_to_human function, only proper numbers are passed."""
assert bytes_to_human(input_data) == expected
@pytest.mark.parametrize(
'input_data,expected',
[
(0, u'0.00 bits'),
(0.5, u'0.50 bits'),
(0.54, u'0.54 bits'),
(1024, u'1.00 Kb'),
(1025, u'1.00 Kb'),
(1536, u'1.50 Kb'),
(1790, u'1.75 Kb'),
(1048576, u'1.00 Mb'),
(1073741824, u'1.00 Gb'),
(1099511627776, u'1.00 Tb'),
(1125899906842624, u'1.00 Pb'),
(1152921504606846976, u'1.00 Eb'),
(1180591620717411303424, u'1.00 Zb'),
(1208925819614629174706176, u'1.00 Yb'),
]
)
def test_bytes_to_human_isbits(input_data, expected):
"""Test of bytes_to_human function with isbits=True proper results."""
assert bytes_to_human(input_data, isbits=True) == expected
@pytest.mark.parametrize(
'input_data,unit,expected',
[
(0, u'B', u'0.00 Bytes'),
(0.5, u'B', u'0.50 Bytes'),
(0.54, u'B', u'0.54 Bytes'),
(1024, u'K', u'1.00 KB'),
(1536, u'K', u'1.50 KB'),
(1790, u'K', u'1.75 KB'),
(1048576, u'M', u'1.00 MB'),
(1099511627776, u'T', u'1.00 TB'),
(1152921504606846976, u'E', u'1.00 EB'),
(1180591620717411303424, u'Z', u'1.00 ZB'),
(1208925819614629174706176, u'Y', u'1.00 YB'),
(1025, u'KB', u'1025.00 Bytes'),
(1073741824, u'Gb', u'1073741824.00 Bytes'),
(1125899906842624, u'Pb', u'1125899906842624.00 Bytes'),
]
)
def test_bytes_to_human_unit(input_data, unit, expected):
"""Test unit argument of bytes_to_human function proper results."""
assert bytes_to_human(input_data, unit=unit) == expected
@pytest.mark.parametrize(
'input_data,unit,expected',
[
(0, u'B', u'0.00 bits'),
(0.5, u'B', u'0.50 bits'),
(0.54, u'B', u'0.54 bits'),
(1024, u'K', u'1.00 Kb'),
(1536, u'K', u'1.50 Kb'),
(1790, u'K', u'1.75 Kb'),
(1048576, u'M', u'1.00 Mb'),
(1099511627776, u'T', u'1.00 Tb'),
(1152921504606846976, u'E', u'1.00 Eb'),
(1180591620717411303424, u'Z', u'1.00 Zb'),
(1208925819614629174706176, u'Y', u'1.00 Yb'),
(1025, u'KB', u'1025.00 bits'),
(1073741824, u'Gb', u'1073741824.00 bits'),
(1125899906842624, u'Pb', u'1125899906842624.00 bits'),
]
)
def test_bytes_to_human_unit_isbits(input_data, unit, expected):
"""Test unit argument of bytes_to_human function with isbits=True proper results."""
assert bytes_to_human(input_data, isbits=True, unit=unit) == expected
@pytest.mark.parametrize('input_data', [0j, u'1B', [1], {1: 1}, None, b'1B'])
def test_bytes_to_human_illegal_size(input_data):
"""Test of bytes_to_human function, illegal objects are passed as a size."""
e_regexp = (r'(no ordering relation is defined for complex numbers)|'
r'(unsupported operand type\(s\) for /)|(unorderable types)|'
r'(not supported between instances of)')
with pytest.raises(TypeError, match=e_regexp):
bytes_to_human(input_data)
| Python | 0 | |
4582edfe2b138fd63645caddde198dc7fee9bd0a | Fix adb_install_apk, broken by 9b3e716. | build/android/adb_install_apk.py | build/android/adb_install_apk.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to install APKs from the command line quickly."""
import argparse
import logging
import os
import sys
from pylib import constants
from pylib.device import device_blacklist
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.utils import run_tests_helper
def main():
parser = argparse.ArgumentParser()
apk_group = parser.add_mutually_exclusive_group(required=True)
apk_group.add_argument('--apk', dest='apk_name',
help='DEPRECATED The name of the apk containing the'
' application (with the .apk extension).')
apk_group.add_argument('apk_path', nargs='?',
help='The path to the APK to install.')
# TODO(jbudorick): Remove once no clients pass --apk_package
parser.add_argument('--apk_package', help='DEPRECATED unused')
parser.add_argument('--keep_data',
action='store_true',
default=False,
help='Keep the package data when installing '
'the application.')
parser.add_argument('--debug', action='store_const', const='Debug',
dest='build_type',
default=os.environ.get('BUILDTYPE', 'Debug'),
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
parser.add_argument('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
parser.add_argument('-d', '--device', dest='device',
help='Target device for apk to install on.')
parser.add_argument('-v', '--verbose', action='count',
help='Enable verbose logging.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
constants.SetBuildType(args.build_type)
apk = args.apk_path or args.apk_name
if not apk.endswith('.apk'):
apk += '.apk'
if not os.path.exists(apk):
apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
if not os.path.exists(apk):
parser.error('%s not found.' % apk)
devices = device_utils.DeviceUtils.HealthyDevices()
if args.device:
devices = [d for d in devices if d == args.device]
if not devices:
raise device_errors.DeviceUnreachableError(args.device)
elif not devices:
raise device_errors.NoDevicesError()
def blacklisting_install(device):
try:
device.Install(apk, reinstall=args.keep_data)
except device_errors.CommandFailedError:
logging.exception('Failed to install %s', args.apk_name)
device_blacklist.ExtendBlacklist([str(device)])
logging.warning('Blacklisting %s', str(device))
except device_errors.CommandTimeoutError:
logging.exception('Timed out while installing %s', args.apk_name)
device_blacklist.ExtendBlacklist([str(device)])
logging.warning('Blacklisting %s', str(device))
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to install APKs from the command line quickly."""
import argparse
import logging
import os
import sys
from pylib import constants
from pylib.device import device_blacklist
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.utils import run_tests_helper
def main():
parser = argparse.ArgumentParser()
apk_group = parser.add_mutually_exclusive_group(required=True)
apk_group.add_argument('--apk', dest='apk_name',
help='DEPRECATED The name of the apk containing the'
' application (with the .apk extension).')
apk_group.add_argument('apk_path', nargs='?',
help='The path to the APK to install.')
# TODO(jbudorick): Remove once no clients pass --apk_package
parser.add_argument('--apk_package', help='DEPRECATED unused')
parser.add_argument('--keep_data',
action='store_true',
default=False,
help='Keep the package data when installing '
'the application.')
parser.add_argument('--debug', action='store_const', const='Debug',
dest='build_type',
default=os.environ.get('BUILDTYPE', 'Debug'),
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
parser.add_argument('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
parser.add_argument('-d', '--device', dest='device',
help='Target device for apk to install on.')
parser.add_argument('-v', '--verbose', action='count',
help='Enable verbose logging.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
constants.SetBuildType(args.build_type)
apk = args.apk_path or args.apk_name
if not apk.endswith('.apk'):
apk += '.apk'
if not os.path.exists(apk):
apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
if not os.path.exists(apk):
parser.error('%s not found.' % apk)
devices = device_utils.DeviceUtils.HealthyDevices()
if args.device:
devices = [d for d in devices if d == args.device]
if not devices:
raise device_errors.DeviceUnreachableError(args.device)
elif not devices:
raise device_errors.NoDevicesError()
def blacklisting_install(device):
try:
device.Install(apk, reinstall=args.keep_data)
except device_errors.CommandFailedError:
logging.exception('Failed to install %s', args.apk)
device_blacklist.ExtendBlacklist([str(device)])
logging.warning('Blacklisting %s', str(device))
except device_errors.CommandTimeoutError:
logging.exception('Timed out while installing %s', args.apk)
device_blacklist.ExtendBlacklist([str(device)])
logging.warning('Blacklisting %s', str(device))
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000003 |
9019bcdbd105a33410e3baaf5843daa3bc4372f7 | created export django command export_welltags_documents_csv. tested locally | app/backend/wells/management/commands/export_welltags_documents_csv.py | app/backend/wells/management/commands/export_welltags_documents_csv.py | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
from collections import defaultdict
import urllib.parse
from django.core.management.base import BaseCommand
from minio import Minio
from gwells.settings.base import get_env_variable
# Run from command line :
# python manage.py export_welltags_documents_csv
#
# For development/debugging, it's useful to skip upload and cleanup
# python manage.py export_welltags_documents_csv --cleanup=0 --upload=0
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def __init__(self):
"""
define our custom variables class wide
"""
super().__init__()
self.gwells_url_prefix = get_env_variable('WATER_1651_GWELLS_URL_PREFIX')
self.gwells_s3_prefix = get_env_variable('WATER_1651_GWELLS_S3_PREFIX')
self.output_filename = 'gwells_export_welltags_documents.csv'
def add_arguments(self, parser):
# Arguments added for debugging purposes.
# e.g. don't cleanup, don't upload: python manage.py export_welltags_documents_csv --cleanup=0 --upload=0
parser.add_argument('--cleanup', type=int, nargs='?', help='If 1, remove file when done', default=1)
parser.add_argument('--upload', type=int, nargs='?', help='If 1, upload the file', default=1)
def handle(self, *args, **options):
"""
primary entrypoint
"""
logger.info('starting export_welltags_documents_csv')
self.export(self.output_filename)
if options['upload'] == 1:
self.upload_file(self.output_filename)
if options['cleanup'] == 1:
logger.info('cleaning up')
if os.path.exists(self.output_filename):
os.remove(self.output_filename)
logger.info('export_welltags_documents_csv complete')
self.stdout.write(self.style.SUCCESS('export_welltags_documents_csv complete'))
def export(self, filename):
"""
using the minio client, list all objects in the S3_WELL_BUCKET recursively
place the values into a dict
take those values from the dict and place them into a defaultdict(list) to get unique well tags per row
and multiple document urls for that unique well tag
"""
if os.path.exists(filename):
os.remove(filename)
# recursively walk the minio bucket
client = Minio(get_env_variable('S3_HOST'),
access_key=get_env_variable('S3_PUBLIC_ACCESS_KEY'),
secret_key=get_env_variable('S3_PUBLIC_SECRET_KEY'))
objects = client.list_objects(get_env_variable('S3_WELL_BUCKET'), recursive=True)
wells = []
unique_well_dict = defaultdict(list)
for o in objects:
try:
well_tag = o.object_name[o.object_name.find('/WTN '):o.object_name.find('_')].replace('/WTN ', '')
if well_tag is not None and well_tag != '':
well_output = {'well_tag': well_tag,
'well_url': f'{self.gwells_url_prefix}{well_tag}',
'document_url': f'{self.gwells_s3_prefix}{get_env_variable("S3_WELL_BUCKET")}/{o.object_name.replace(" ", "%20")}'
}
wells.append(well_output)
except:
pass
# write our wells out to a unique welltag per row!
for well in wells:
if well['well_url'] not in unique_well_dict[well['well_tag']]:
unique_well_dict[well['well_tag']].append(well['well_url'])
unique_well_dict[well['well_tag']].append(well['document_url'])
# write our well tags and their child array items to csvfile
with open(filename, 'w') as csvfile:
for well_tag in unique_well_dict:
csvfile.write(f'{well_tag},')
for array_item in unique_well_dict[well_tag]:
csvfile.write(f'{array_item},')
csvfile.write('\n')
self.stdout.write(self.style.SUCCESS(f'wrote file to: {os.getcwd()}/{filename}'))
def upload_file(self, filename):
"""
upload our file to S3_HOST, secure, S3_WELL_BUCKET export/filename
"""
client = Minio(get_env_variable('S3_HOST'),
access_key=get_env_variable('S3_PUBLIC_ACCESS_KEY'),
secret_key=get_env_variable('S3_PUBLIC_SECRET_KEY'),
secure='1')
logger.info('uploading {}'.format(filename))
# write our file to minio
with open(filename, 'rb') as file_data:
file_stat = os.stat(filename)
target = f'export/{filename}'
client.put_object(get_env_variable('S3_WELL_BUCKET'),
target,
file_data,
file_stat.st_size)
self.stdout.write(self.style.SUCCESS(f'uploaded file to: {get_env_variable("S3_HOST")}/{get_env_variable("S3_WELL_BUCKET")}/{target}'))
| Python | 0.999991 | |
b447711c4396c36bc845184961d28660735c6f3d | Create window.py | src/new/window.py | src/new/window.py | # window draws
# editor window
class EditorWindow(Fl_Double_Window) :
search = ""
def __init__(self, w, h, label) :
Fl_Double_Window.__init__(self, w, h, label)
# set/update title
def set_title(win):
global filename, title
if len(filename) == 0:
title = "Untitled"
else:
title = os.path.basename(filename)
if changed:
title = title+" (modified)"
win.label(title)
| Python | 0.000001 | |
7ef6c8c3ea0e2481a424bcca91496ce14c0aec4a | add basic file verifier, just checks dimensions, need to add header and vlr checks. | misc/file_verify.py | misc/file_verify.py | #!/usr/bin/env python
import sys
sys.path.append("../")
from laspy import file as File
inFile1 = File.File(sys.argv[1],mode= "r")
inFile2 = File.File(sys.argv[2],mode= "r")
spec = inFile1.reader.point_format.lookup.keys()
def f(x):
return(list(inFile1.reader.get_dimension(x)) == list(inFile2.reader.get_dimension(x)))
passed = 0
failed = 0
for dim in spec:
if f(dim):
passed += 1
print("Dimension: " + dim + " is identical.")
else:
failed += 1
print("Dimension: " + dim + " is not identical")
print(str(passed) + " identical dimensions out of " + str(passed + failed))
inFile1.close()
inFile2.close()
| Python | 0 | |
895571ec359e7571f8581f3635ae1c452ed911a5 | add a nova command | cloudmesh_cmd3/plugins/cm_shell_nova.py | cloudmesh_cmd3/plugins/cm_shell_nova.py | from cmd3.shell import command
from cloudmesh_common.logger import LOGGER
import os
from cloudmesh_common.tables import row_table
log = LOGGER(__file__)
class cm_shell_nova:
"""opt_example class"""
def activate_cm_shell_nova(self):
self.register_command_topic('cloud','nova')
pass
@command
def do_nova(self, args, arguments):
"""
Usage:
nova login
nova info
nova help
nova ARGUMENTS
A simple wrapper for the openstack nova command
Arguments:
ARGUMENTS The arguments passed to nova
help Prints the nova manual
login reads the information from the current cloud
and updates the environment variables if
the cloud is an openstack cloud
info the environment values for OS
Options:
-v verbose mode
"""
# log.info(arguments)
if arguments["help"]:
os.system("nova help")
return
elif arguments["info"]:
#
# prints the current os env variables for nova
#
d = {}
for attribute in ['OS_USER_ID',
'OS_USERNAME',
'OS_TENANT_NAME',
'OS_AUTH_URL',
'OS_CACERT',
'OS_PASSWORD',
'OS_REGION']:
try:
d[attribute] = os.environ[attribute]
except:
d[attribute] = None
print row_table(d, order=None, labels=["Variable", "Value"])
return
elif arguments["login"]:
print "Not yet implemented"
#
# TODO: implemet
#
# cloud = get current default
# if cloud type is openstack:
# credentials = get credentials
# set the credentials in the current os system env variables
#
else:
os.system("nova {0}".format(arguments["ARGUMENTS"]))
return
| Python | 0.000013 | |
2bf2a0849c1524f3ac56533d9f36eb907213f819 | Add WebAPI plugin | proxy/plugins/WebAPI.py | proxy/plugins/WebAPI.py | from ..data import clients, blocks, players
from twisted.web.server import Site
from twisted.web.resource import Resource
import json, time
upStart = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
class WebAPI(Resource):
def render_GET(self, request):
currData = {'count' : len(clients.connectedClients), 'blocksCached' : len(blocks.blockList), 'playersCached' : len(players.playerList), 'upSince' : upStart}
return json.dumps(currData) | Python | 0 | |
19348f5d8e2832fbf378578d38516df66dc849b6 | Implement IRCv3.1 StartTLS | heufybot/modules/ircv3/starttls.py | heufybot/modules/ircv3/starttls.py | from twisted.internet.interfaces import ISSLTransport
from twisted.plugin import IPlugin
from heufybot.moduleinterface import BotModule, IBotModule
from zope.interface import implements
try:
from twisted.internet import ssl
except ImportError:
ssl = None
class IRCv3StartTLS(BotModule):
implements(IPlugin, IBotModule)
name = "StartTLS"
def actions(self):
return [ ("listcaps", 1, self.addToCapList),
("caps-acknowledged", 1, self.requestNegotiation),
("pre-handlenumeric-670", 1, self.startNegotiation),
("pre-handlenumeric-691", 1, self.negotiationFailed) ]
def addToCapList(self, server, caps):
if not self.bot.servers[server].secureConnection and ssl is not None:
caps.append("tls")
def requestNegotiation(self, server, caps):
if "tls" in caps:
self.bot.log.info("[{server}] Trying to initiate StartTLS...", server=server)
self.bot.servers[server].sendMessage("STARTTLS")
def startNegotiation(self, server, prefix, params):
self.bot.log.info("[{server}] Server replied: \"{reply}\"", server=server, reply=params[1])
self.bot.log.info("[{server}] Proceeding with TLS handshake...", server=server)
self.bot.servers[server].transport.startTLS(ssl.CertificateOptions())
if ISSLTransport.providedBy(self.bot.servers[server].transport):
self.bot.servers[server].secureConnection = True
self.bot.log.info("[{server}] TLS handshake successful. Connection is now secure.", server=server)
return True
def negotiationFailed(self, server, prefix, params):
self.bot.log.warn("[{server}] StartTLS failed, reason: \"{reply}\".", server=server, reply=params[1])
return True
startTLS = IRCv3StartTLS()
| Python | 0.999316 | |
63e14ae4485bcca682b952e5ab7f125f58c3d960 | Add pwnypack ipython extension. | pwnypack/ipython_ext.py | pwnypack/ipython_ext.py | import functools
import shlex
import pwny
import pwnypack.main
__all__ = []
def call_main_func(func_name, ipython, line):
pwnypack.main.main([func_name] + shlex.split(line))
def load_ipython_extension(ipython):
ipython.push(vars(pwny))
for f_name in pwnypack.main.MAIN_FUNCTIONS:
ipython.define_magic(f_name, functools.partial(call_main_func, f_name))
def unload_ipython_extension(ipython):
ipython.drop_by_id(vars(pwny))
| Python | 0 | |
7fbfca47b2b435a0aa4df8d39699831f752f351d | Add initial code for scraping standings data | pybaseball/standings.py | pybaseball/standings.py | from bs4 import BeautifulSoup
import requests
import datetime
def get_soup(date):
#year, month, day = [today.strftime("%Y"), today.strftime("%m"), today.strftime("%d")]
#url = "http://www.baseball-reference.com/boxes?year={}&month={}&day={}".format(year, month, day)
year = date.strftime("%Y")
url = 'http://www.baseball-reference.com/leagues/MLB/{}-standings.shtml'.format(year)
s=requests.get(url).content
return BeautifulSoup(s)
def get_tables(soup):
tables = soup.find_all('table')
datasets = []
for table in tables:
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
#data.append(row.find_all('a')[0]['title']) # team name
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
cols.insert(0,row.find_all('a')[0]['title'])
data.append([ele for ele in cols if ele])
datasets.append(data)
return datasets
def standings(date=None):
# get most recent standings if date not specified
if(date is None):
date = datetime.datetime.today()
# retrieve html from baseball reference
soup = get_soup(date)
tables = get_tables(soup)
return tables
| Python | 0 | |
d187c51ccd9dc1676b6f16eddecee6dce752d668 | Make class test-class name more specific | distarray/tests/test_client.py | distarray/tests/test_client.py | import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestDistArrayContext(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def test_create_DAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def test_create_DAC_with_targets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
| import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def testCreateDAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def testCreateDACwithTargets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
| Python | 0 |
b5d8b29a34a4675ad5de33511bfca486f648a134 | Create _source.py | static/_source.py | static/_source.py | # coding: utf-8
# BlackSmith general configuration file
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Jabber server to connect
SERVER = 'example.com'
# Connecting Port
PORT = 5222
# Jabber server`s connecting Host
HOST = 'example.com'
# Using TLS (True - to enable, False - to disable)
SECURE = True
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# User`s account
USERNAME = 'username'
# Jabber ID`s Password
PASSWORD = 'password'
# Resourse (please don`t touch it)
RESOURCE = u'simpleApps' # You can write unicode symbols here
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Default chatroom nick
DEFAULT_NICK = u'BlackSmith-m.1' # You can write unicode symbols here
# Groupchat message size limit
CHAT_MSG_LIMIT = 1024
# Private/Roster message size limit
PRIV_MSG_LIMIT = 2024
# Incoming message size limit
INC_MSG_LIMIT = 8960
# Working without rights of moder (True - to enable, False - to disable)
MSERVE = False
# Jabber account of bot`s owner
BOSS = 'boss@example.com'
# Memory usage limit (size in kilobytes, 0 - not limited)
MEMORY_LIMIT = 49152
# Admin password, used as a key to command "login"
BOSS_PASS = ''
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
| Python | 0.000002 | |
1d89c30a286269f08edccf86bd7701bd34dc4083 | add expect_column_values_to_be_valid_ny_zip (#4713) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_new_york_state_zip.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_new_york_state_zip.py | import json
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_new_york_state_zip(zip: str):
list_of_dicts_of_new_york_state_zips = zipcodes.filter_by(state="NY")
list_of_new_york_state_zips = [
d["zip_code"] for d in list_of_dicts_of_new_york_state_zips
]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_of_new_york_state_zips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidNewYorkStateZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_new_york_state_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_new_york_state_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidNewYorkStateZip(ColumnMapExpectation):
"""Expect values in this column to be valid New York state zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_new_york_state_zip": ["14652", "14701", "14711", "13739"],
"invalid_new_york_state_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_new_york_state_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_new_york_state_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_new_york_state_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidNewYorkStateZip().print_diagnostic_checklist()
| Python | 0 | |
647f0c1409dcd22d69a79d21571d2c03f794a2a8 | Test iter and yield | 99_misc/iterator.py | 99_misc/iterator.py | #/usr/bin/env python
# Test yield generator
def my_double(arr):
for i in arr:
yield i * 2
for i in my_double(range(1, 10)):
print("{0} ".format(i)),
print("\n"),
# Text iteration
i = iter(my_double(range(10, 21)))
print i
for j in range (1, 10):
print("{0} ".format(i.next())),
| Python | 0 | |
28944376472130d53a05f7473e7213c917207cd4 | Add model representing a listing | apartments/models.py | apartments/models.py | from sqlalchemy import create_engine, Column, DateTime, Float, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Listing(Base):
__tablename__ = 'listings'
id = Column(Integer, primary_key=True)
craigslist_id = Column(Integer, unique=True)
name = Column(String)
price = Column(Float)
link = Column(String, unique=True)
created = Column(DateTime)
area = Column(String)
geotag = Column(String)
lat = Column(Float)
lon = Column(Float)
location = Column(String)
nearest_stop = Column(String)
def __repr__(self):
return f'<Listing(name={self.name}, price={self.price}, craigslist_id={self.craigslist_id})>'
engine = create_engine('sqlite:///apartments.db')
Base.metadata.create_all(engine)
| Python | 0 | |
38cbc73f70a9ca896a29d7fa2e000388bbf40d88 | Add script to generate data from an experiment | DilipadTopicModelling/experiment_get_results.py | DilipadTopicModelling/experiment_get_results.py | import logging
import os
import pandas as pd
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# select experiment to get parameters from
nTopics = 100
start = 80
end = 199
alpha = 50.0/nTopics
beta = 0.02
nIter = 200
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
topicDict='{}/topicDict.dict'.format(data_dir),
opinionDict='{}/opinionDict.dict'.format(data_dir))
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'.format(nTopics)
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha,
beta=beta, beta_o=beta, out_dir=out_dir)
sampler._initialize()
sampler.estimate_parameters(start=start, end=end)
pd.DataFrame(sampler.theta).to_csv(os.path.join(out_dir, 'theta_{}.csv'.
format(nTopics)))
topics = sampler.topics_to_df(phi=sampler.topics, words=corpus.topic_words())
topics.to_csv(os.path.join(out_dir, 'topics_{}.csv'.format(nTopics)))
for i, p in enumerate(sampler.corpus.perspectives):
opinions = sampler.topics_to_df(phi=sampler.opinions[i],
words=corpus.opinion_words())
opinions.to_csv(os.path.join(out_dir,
'opinions_{}_{}.csv'.format(p.name, nTopics)))
| Python | 0 | |
656d94c0375f6a96cc3a9d4b3227d8f19afe3dea | Add lemon drop elevator model | control/systems/main.py | control/systems/main.py | import numpy as np
Kt = 1.41/89.0
Kv = 5840.0/3.0
G = 10.0
J = 4.0*(2.54**2.0)/2.0 # 4 kg on a 1 inch pully
R = 12.0/89.0
A = np.asarray([[0, 1],
[0, -(Kt*Kv)/((G**2)*J*R)]])
B = np.asarray([[0],
[Kt/(G*J*R)]])
| Python | 0 | |
2ca0d97649529dfc66486dc1d3e7fa1e37d8ee91 | add integration test for api analytics | integrations/test_api_analytics.py | integrations/test_api_analytics.py | """Integration tests for internal analytics."""
# standard library
import unittest
# third party
import mysql.connector
import requests
# use the local instance of the Epidata API
BASE_URL = 'http://delphi_web_epidata/epidata/api.php'
class ApiAnalyticsTests(unittest.TestCase):
"""Tests internal analytics not specific to any particular endpoint."""
def setUp(self):
"""Perform per-test setup."""
# connect to the `epidata` database and clear the `api_analytics` table
cnx = mysql.connector.connect(
user='user',
password='pass',
host='delphi_database_epidata',
database='epidata')
cur = cnx.cursor()
cur.execute('truncate table api_analytics')
cnx.commit()
cur.close()
# make connection and cursor available to test cases
self.cnx = cnx
self.cur = cnx.cursor()
def tearDown(self):
"""Perform per-test teardown."""
self.cur.close()
self.cnx.close()
def test_analytics_update(self):
"""Update internal analytics for requests to the API."""
make_request = lambda src: requests.get(BASE_URL, params={'source': src})
# make some requests
for _ in range(1):
make_request('source1')
for _ in range(5):
make_request('source2')
for _ in range(19):
make_request('source3')
# verify that analytics are available
self.cur.execute('''
select source, count(1)
from api_analytics
group by source
order by source
''')
values = [row for row in self.cur]
self.assertEqual(values, [
('source1', 1),
('source2', 5),
('source3', 19),
])
| Python | 0 | |
8cc622db293816fc96bb7df0139b57a2b5a2eaef | add scanning of live IP addresses with ping sweep, multi threading | Scan_IpAdds_ping.py | Scan_IpAdds_ping.py | import os, platform, collections
import socket, subprocess,sys
import threading
from datetime import datetime
class myThread (threading.Thread):
def __init__(self,startLastOctet,endLastOctet):
threading.Thread.__init__(self)
self.startLastOctet = startLastOctet
self.endLastOctet = endLastOctet
def run(self):
runThread(self.startLastOctet,self.endLastOctet)
def getNetwork():
net = raw_input("Enter the Network Address:\t\t ")
netSplit= net.split('.')
a = '.'
firstThreeOctet = netSplit[0]+a+netSplit[1]+a+netSplit[2]+a
startLastOctet = int(raw_input("Enter the beginning of last Octet:\t "))
endLastOctet = int(raw_input("Enter the end od last Octet:\t\t "))
endLastOctet =endLastOctet+1
dic = collections.OrderedDict()
oper = platform.system()
if (oper=="Windows"):
pingCmd = "ping -n 1 "
elif (oper== "Linux"):
pingCmd = "ping -c 1 "
else :
pingCmd = "ping -c 1 "
return firstThreeOctet, startLastOctet, endLastOctet, dic, pingCmd
def runThread(startLastOctet,endLastOctet):
#print "Scanning in Progess"
for ip in xrange(startLastOctet,endLastOctet):
addr = firstThreeOctet+str(ip)
pingAddress = pingCmd+addr
response = os.popen(pingAddress)
for line in response.readlines():
#if(line.count("TTL")):
# break
if (line.count("ttl")):
#print addr, "--> Live"
dic[ip]= addr
break
if __name__ == '__main__':
subprocess.call('clear',shell=True)
print "-" * 75
print "This program search for life IPs in last octet, with multiple threads "
print "\tFor example: 192.168.11.xxx - 192.168.11.yyy"
print "-" * 75
firstThreeOctet, startLastOctet, endLastOctet, dic, pingCmd = getNetwork()
t1= datetime.now()
total_ip =endLastOctet-startLastOctet
tn =3 # number of ip handled by one thread
total_thread = total_ip/tn
total_thread=total_thread+1
threads= []
try:
for i in xrange(total_thread):
en = startLastOctet+tn
if(en >endLastOctet):
en =endLastOctet
thread = myThread(startLastOctet,en)
thread.start()
threads.append(thread)
startLastOctet =en
except:
print "Error: unable to start thread"
print "\t Number of Threads active:", threading.activeCount()
for t in threads:
t.join()
print "\tExiting Main Thread"
sortedIPs = collections.OrderedDict(sorted(dic.items()))
for key in sortedIPs:
print "IP address: {} \t --> Live".format(sortedIPs[key])
t2= datetime.now()
total =t2-t1
print "Scanning complete in " , total
| Python | 0 | |
1489e896952f5a3ea498618f615c5fd133a297c7 | Add test cases for pricing API with DiscountModules | shoop_tests/core/test_pricing_discounts.py | shoop_tests/core/test_pricing_discounts.py | # This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import pytest
from django.conf import settings
from shoop.apps.provides import override_provides
from shoop.core.pricing import (
DiscountModule, get_price_info, get_price_infos, get_pricing_steps,
get_pricing_steps_for_products
)
from shoop.testing.factories import create_product, get_default_shop
from shoop.testing.utils import apply_request_middleware
provide_overrider = override_provides(
"discount_module", [__name__ + ':Minus25DiscountModule'])
def setup_module(module):
global original_pricing_module
global original_discount_modules
original_pricing_module = settings.SHOOP_PRICING_MODULE
original_discount_modules = settings.SHOOP_DISCOUNT_MODULES
settings.SHOOP_PRICING_MODULE = "default_pricing"
settings.SHOOP_DISCOUNT_MODULES = ["minus25"]
provide_overrider.__enter__()
def teardown_module(module):
global original_pricing_module
global original_discount_modules
provide_overrider.__exit__(None, None, None)
settings.SHOOP_PRICING_MODULE = original_pricing_module
settings.SHOOP_DISCOUNT_MODULES = original_discount_modules
class Minus25DiscountModule(DiscountModule):
identifier = "minus25"
def discount_price(self, context, product, price_info):
price_info.price *= (1 - decimal.Decimal('0.25'))
return price_info
def initialize_test(rf):
shop = get_default_shop()
request = rf.get("/")
request.shop = shop
apply_request_middleware(request)
product1 = create_product("test-product1", shop=shop, default_price=120)
product2 = create_product("test-product2", shop=shop, default_price=180)
return (request, [product1, product2], shop.create_price)
@pytest.mark.django_db
def test_get_price_info(rf):
(request, products, price) = initialize_test(rf)
pi = get_price_info(request, products[0])
assert pi.price == price(90)
assert pi.base_price == price(120)
assert pi.quantity == 1
@pytest.mark.django_db
def test_get_price_info_with_quantity(rf):
(request, products, price) = initialize_test(rf)
pi = get_price_info(request, products[0], 20)
assert pi.price == price(1800)
assert pi.base_price == price(2400)
assert pi.quantity == 20
@pytest.mark.django_db
def test_product_get_price_info(rf):
(request, products, price) = initialize_test(rf)
pi = products[0].get_price_info(request)
assert pi.price == price(90)
assert pi.base_price == price(120)
@pytest.mark.django_db
def test_get_price_infos(rf):
(request, products, price) = initialize_test(rf)
pis = get_price_infos(request, products)
assert set(pis.keys()) == set(x.id for x in products)
pi1 = pis[products[0].id]
pi2 = pis[products[1].id]
assert pi1.price == price(90)
assert pi1.base_price == price(120)
assert pi2.price == price(135)
assert pi2.base_price == price(180)
@pytest.mark.django_db
def test_get_pricing_steps(rf):
(request, products, price) = initialize_test(rf)
pis = get_pricing_steps(request, products[0])
assert len(pis) == 1
assert pis[0].quantity == 1
assert pis[0].price == price(90)
assert pis[0].base_price == price(120)
@pytest.mark.django_db
def test_get_pricing_steps_for_products(rf):
(request, products, price) = initialize_test(rf)
pis = get_pricing_steps_for_products(request, products)
assert set(pis.keys()) == set(x.id for x in products)
assert len(pis[products[0].id]) == 1
assert len(pis[products[1].id]) == 1
assert pis[products[0].id][0].quantity == 1
assert pis[products[0].id][0].price == price(90)
assert pis[products[0].id][0].base_price == price(120)
assert pis[products[1].id][0].quantity == 1
assert pis[products[1].id][0].price == price(135)
assert pis[products[1].id][0].base_price == price(180)
| Python | 0 | |
e670de6ecb7be3da56acf2976148574165cb69aa | Add missing test module | h5py/tests/test_utils.py | h5py/tests/test_utils.py | #+
#
# This file is part of h5py, a low-level Python interface to the HDF5 library.
#
# Copyright (C) 2008 Andrew Collette
# http://h5py.alfven.org
# License: BSD (See LICENSE.txt for full license)
#
# $Date$
#
#-
import sys
import numpy
from common import HDF5TestCase, api_18
from h5py import *
from h5py import utils
from h5py.h5 import H5Error
class TestUtils(HDF5TestCase):
def test_check_read(self):
""" Check if it's possible to read from the NumPy array """
carr = numpy.ones((10,10), order='C')
farr = numpy.ones((10,10), order='F')
oarr = numpy.ones((10,10), order='C')
oarr.strides = (0,1)
utils.check_numpy_read(carr)
self.assertRaises(TypeError, utils.check_numpy_read, farr)
self.assertRaises(TypeError, utils.check_numpy_read, oarr)
s_space = h5s.create_simple((5,5))
m_space = h5s.create_simple((10,10))
l_space = h5s.create_simple((12,12))
utils.check_numpy_read(carr, m_space.id)
utils.check_numpy_read(carr, l_space.id)
self.assertRaises(TypeError, utils.check_numpy_read, carr, s_space.id)
# This should not matter for read
carr.flags['WRITEABLE'] = False
utils.check_numpy_read(carr)
def test_check_write(self):
""" Check if it's possible to write to the NumPy array """
carr = numpy.ones((10,10), order='C')
farr = numpy.ones((10,10), order='F')
oarr = numpy.ones((10,10), order='C')
oarr.strides = (0,1)
utils.check_numpy_write(carr)
self.assertRaises(TypeError, utils.check_numpy_write, farr)
self.assertRaises(TypeError, utils.check_numpy_write, oarr)
s_space = h5s.create_simple((5,5))
m_space = h5s.create_simple((10,10))
l_space = h5s.create_simple((12,12))
utils.check_numpy_write(carr, s_space.id)
utils.check_numpy_write(carr, m_space.id)
self.assertRaises(TypeError, utils.check_numpy_write, carr, l_space.id)
# This should matter now
carr.flags['WRITEABLE'] = False
self.assertRaises(TypeError, utils.check_numpy_write, carr)
def test_emalloc(self):
utils._test_emalloc(1024)
utils._test_emalloc(0)
self.assertRaises(MemoryError, utils._test_emalloc, sys.maxint)
| Python | 0.000002 | |
3fd4244dbfd33bbf2fa369d81756e82b1cf1c467 | Clear out unaligned NLCD19 GWLF-E results | src/mmw/apps/modeling/migrations/0041_clear_nlcd2019_gwlfe_results.py | src/mmw/apps/modeling/migrations/0041_clear_nlcd2019_gwlfe_results.py | # Generated by Django 3.2.13 on 2022-10-17 13:47
from django.db import migrations
def clear_nlcd2019_gwlfe_results(apps, schema_editor):
"""
Clear the results for all scenarios belonging to GWLF-E projects made after
the release of 1.33.0, which had incorrectly aligned NLCD19 2019 on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These results will be recalculated with the correclty aligned NLCD19 values
when these projects are accessed again.
"""
Project = apps.get_model('modeling', 'Project')
Scenario = apps.get_model('modeling', 'Scenario')
Project.objects.filter(
model_package='gwlfe',
created_at__gte='2022-01-17',
).update(
gis_data=None,
mapshed_job_uuid=None,
subbasin_mapshed_job_uuid=None,
)
Scenario.objects.filter(
project__model_package='gwlfe',
project__created_at__gte='2022-01-17',
).update(
results='[]',
modification_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0040_clear_nlcd2019_tr55_results'),
]
operations = [
migrations.RunPython(clear_nlcd2019_gwlfe_results),
]
| Python | 0 | |
34046e290842108212d71f6cf2445d7015bf2423 | Create text.py | dasem/text.py | dasem/text.py | """text."""
from nltk import sent_tokenize, word_tokenize
def sentence_tokenize(text):
"""Tokenize a Danish text into sentence.
The model from NTLK trained on Danish is used.
Parameters
----------
text : str
The text to be tokenized.
Returns
-------
sentences : list of str
Sentences as list of strings.
Examples
--------
>>> text = 'Hvad!? Hvor har du f.eks. siddet?'
>>> sentences = sentence_tokenize(text)
>>> sentences
['Hvad!?', 'Hvor har du f.eks. siddet?']
"""
return sent_tokenize(text, language='danish')
def word_tokenize(sentence):
"""Tokenize a Danish sentence into words."""
return word_tokenize(sentence)
| Python | 0.000103 | |
477de06a99fc4998ec15442e5fae9b919be53392 | Initialize P2_scheduledComicDownloader | books/AutomateTheBoringStuffWithPython/Chapter15/PracticeProjects/P2_scheduledComicDownloader.py | books/AutomateTheBoringStuffWithPython/Chapter15/PracticeProjects/P2_scheduledComicDownloader.py | # Write a program that checks the websites of several web comics and automatically
# downloads the images if the comic was updated since the program’s last visit.
#
# Your operating system’s scheduler (Scheduled Tasks on Windows, launchd on OS X,
# and cron on Linux) can run your Python program once a day.
#
# The Python program itself can download the comic and then copy it to your desktop
# so that it is easy to find. This will free you from having to check the website
# yourself to see whether it has updated.
| Python | 0 | |
960f32fb9f1f34caf0d851a370786f39864c15b2 | add conoha/dns.py | conoha/dns.py | conoha/dns.py |
from .api import API, CustomList
from . import error
__all__ = 'Domain DomainList Record RecordList'.split()
class DNSAPI(API):
def __init__(self, token, baseURIPrefix=None):
super().__init__(token, baseURIPrefix)
self._serviceType = 'dns'
def _getHeaders(self, h):
headers={
'Content-Type': 'application/json'
}
if h:
headers.update(h)
return super()._getHeaders(headers)
class Domain:
"""ドメイン"""
def __init__(self, data):
self.domainId = data['id']
self.name = data['name']
self.email = data['email']
self.serial = data['serial']
self.gslb = data.get('gslb')
self.ttl = data['ttl']
self.description = data['description']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
class DomainList(DNSAPI, CustomList):
"""ドメインの一覧"""
def __init__(self, token):
super().__init__(token)
CustomList.__init__(self)
path = 'domains'
res = self._GET(path)
self.extend(Domain(i) for i in res['domains'])
def _getitem(self, key, domain):
return key in [domain.domainId, domain.name]
def _validateDomain(self, nameOrDomainid):
domain = self.getDomain(nameOrDomainid)
if not domain:
raise error.NotFound('domain', nameOrDomainid)
def toDomainid(self, nameOrDomainid):
domain = self.getDomain(nameOrDomainid)
if domain:
return domain.domainId
def toName(self, nameOrDomainid):
domain = self.getDomain(nameOrDomainid)
if domain:
return domain.name
def getDomain(self, nameOrDomainid):
if nameOrDomainid:
for domain in self:
if (domain.domainId == nameOrDomainid) or (domain.name == nameOrDomainid):
return domain
def add(self, name, email, ttl=None, description=None, gslb=None):
data = {
'name': name,
'email': email,
'ttl': ttl,
'description': description,
'gslb': gslb
}
res = self._POST('domains', {k: v for k, v in data.items() if v is not None})
domain = Domain(res)
self.append(domain)
return domain
def update(self, nameOrDomainid, email=None, ttl=None, description=None, gslb=None):
self._validateDomain(nameOrDomainid)
domain = self.getDomain(nameOrDomainid)
data = {
'email': email,
'ttl': ttl,
'description': description,
'gslb': gslb
}
path = 'domains/{}'.format(domain.domainId)
res = self._PUT(path, {k: v for k, v in data.items() if v is not None})
self.remove(domain)
domain = Domain(res)
self.append(domain)
def delete(self, nameOrDomainid):
self._validateDomain(nameOrDomainid)
domainId = self.toDomainid(nameOrDomainid)
path = 'domains/{}'.format(domainId)
self._DELETE(path, isDeserialize=False)
class Record:
"""レコード"""
def __init__(self, data):
self.recordId = data['id']
self.name = data['name']
self.domainId = data['domain_id']
self.type = data['type']
self.data = data['data']
self.ttl = data['ttl']
self.description = data['description']
self.priority = data['priority']
self.gslb_check = data.get('gslb_check')
self.gslb_region = data.get('gslb_region')
self.gslb_weight = data.get('gslb_weight')
self.created_at = data['created_at']
self.updated_at = data['updated_at']
class RecordList(DNSAPI, CustomList):
"""レコードの一覧"""
def __init__(self, token, domainId):
super().__init__(token)
CustomList.__init__(self)
path = 'domains/{}/records'.format(domainId)
res = self._GET(path)
self.domainId = domainId
self.extend(Record(i) for i in res['records'])
def _getitem(self, key, record):
return key in [record.recordId]
def _validateRecord(self, recordId):
recordId = self.getRecord(recordId)
if not recordId:
raise error.NotFound('record', recordId)
def getRecord(self, recordId):
for record in self:
if record.recordId == recordId:
return record
def add(self, **kwargs):
path = 'domains/{}/records'.format(self.domainId)
res = self._POST(path, {k: v for k, v in kwargs.items() if v is not None})
record = Record(res)
self.append(record)
return record
def update(self, recordId, **kwargs):
self._validateRecord(recordId)
record = self.getRecord(recordId)
path = 'domains/{}/records/{}'.format(self.domainId, recordId)
res = self._PUT(path, {k: v for k, v in kwargs.items() if v is not None})
self.remove(record)
record = Record(res)
self.append(record)
return record
def delete(self, recordId):
self._validateRecord(recordId)
record = self.getRecord(recordId)
path = 'domains/{}/records/{}'.format(record.domainId, record.recordId)
self._DELETE(path, isDeserialize=False)
self.remove(record)
| Python | 0 | |
5dad4f0e2d9732d7ff4a0feebff332f005cabf01 | Remove foreign keys from deprecated `progress-edx-platform-extensions` (#1874) | common/djangoapps/database_fixups/migrations/0002_remove_foreign_keys_from_progress_extensions.py | common/djangoapps/database_fixups/migrations/0002_remove_foreign_keys_from_progress_extensions.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
"""
The `progress-edx-platform-extensions` has been deprecated in favor of `edx-completion`.
The requirement was removed in the commit linked as (1) below. However its migration (2) had not been reverted.
That migration used `auth_user.id` as the foreign key in its models (3), but Django does not resolve this constraint
between existing tables anymore, because the model has been removed.
Therefore we need to drop the tables related to deprecated application in order to be able to remove users properly.
Because of some performance concerns, deletion is implemented in (4).
This migration drops only foreign keys from deprecated tables.
If ran twice (for any reason), it will raise a custom error for better visibility that these keys do not exist.
(1) https://github.com/edx-solutions/edx-platform/commit/59bf3efe71533de53b60bd979517e889d18a96bb
(2) https://github.com/edx-solutions/progress-edx-platform-extensions/blob/master/progress/migrations/0001_initial.py
(3) https://github.com/edx-solutions/progress-edx-platform-extensions/blob/master/progress/models.py
(4) https://github.com/edx-solutions/edx-platform/pull/1862
"""
class Migration(migrations.Migration):
dependencies = [
('database_fixups', '0001_initial'),
]
operations = [
migrations.RunSQL("""
-- Drop a procedure if it already exists - safety check.
DROP PROCEDURE IF EXISTS drop_foreign_key_from_table;
-- We are dropping constraints from 3 tables, so we create a temporary procedure to avoid code repetition.
CREATE PROCEDURE drop_foreign_key_from_table(given_table VARCHAR(64))
BEGIN
-- Find the ID of the foreign key (there is only one per table, otherwise it would fail).
SET @foreign_key = (
SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_NAME = given_table AND CONSTRAINT_TYPE = 'FOREIGN KEY'
);
IF @foreign_key IS NOT NULL THEN
-- Prepare query (MySQL does not allow embedding queries in a standard way here).
SET @statement = CONCAT('ALTER TABLE ', given_table, ' DROP FOREIGN KEY ', @foreign_key);
PREPARE stmt FROM @statement;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
ELSE
-- Raise custom error for having clearer logs in case of a failure.
SET @error_message = CONCAT('Cannot find foreign key in ', given_table, ' table.');
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = @error_message;
END IF;
END;
-- Call temporary procedure on relevant tables.
CALL drop_foreign_key_from_table('progress_coursemodulecompletion');
CALL drop_foreign_key_from_table('progress_studentprogress');
CALL drop_foreign_key_from_table('progress_studentprogresshistory');
-- Clean up.
DROP PROCEDURE IF EXISTS drop_foreign_key_from_table;
""")
]
| Python | 0 | |
9eb35140a1790625c32773af6b8a2d76699e86c6 | Move MapEntityForm to mapentity (ref #129) | mapentity/forms.py | mapentity/forms.py | from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Div, Button
from crispy_forms.bootstrap import FormActions
class MapEntityForm(forms.ModelForm):
pk = forms.Field(required=False, widget=forms.Field.hidden_widget)
model = forms.Field(required=False, widget=forms.Field.hidden_widget)
helper = FormHelper()
helper.form_class = 'form-horizontal'
modelfields = tuple()
geomfields = tuple()
actions = FormActions(
Button('cancel', _('Cancel'), ),
Submit('save_changes', _('Save changes'), css_class="btn-primary offset1"),
css_class="form-actions span11",
)
def __init__(self, *args, **kwargs):
super(MapEntityForm, self).__init__(*args, **kwargs)
# Generic behaviour
if self.instance.pk:
self.helper.form_action = self.instance.get_update_url()
else:
self.helper.form_action = self.instance.get_add_url()
self.fields['pk'].initial = self.instance.pk
self.fields['model'].initial = self.instance._meta.module_name
# Hide label for geom :
for geomfield in self.geomfields:
self.fields[geomfield].label = False
# Get fields from subclasses
fields = ('pk','model') + self.modelfields
leftpanel = Div(
*fields,
css_class="span3"
)
rightpanel = Div(
*self.geomfields,
css_class="span8"
)
# Main form layout
self.helper.layout = Layout(
leftpanel,
rightpanel,
self.actions
)
| Python | 0 | |
2a5012f0b74fa025bbc909fd8bfb10aec272d148 | Create pawn-brotherhood.py | home/pawn-brotherhood.py | home/pawn-brotherhood.py | def safe_pawns ( pawns ) :
n = 0
for file , rank in pawns :
if rank < "2" : continue
if file > "a" :
first = chr( ord(file) - 1) + str( int(rank) - 1 )
if first in pawns :
n += 1
continue
if file < "h" :
second = chr( ord(file) + 1) + str( int(rank) - 1 )
if second in pawns :
n += 1
continue
return n
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert safe_pawns({"b4", "d4", "f4", "c3", "e3", "g5", "d2"}) == 6
assert safe_pawns({"b4", "c4", "d4", "e4", "f4", "g4", "e5"}) == 1
| Python | 0.99898 | |
632be2720287425387454fbe2bd9ce1c7eb2cdfb | basic tests | Orange/widgets/data/tests/test_owconcatenate.py | Orange/widgets/data/tests/test_owconcatenate.py | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import numpy as np
from Orange.data import Table
from Orange.widgets.data.owconcatenate import OWConcatenate
from Orange.widgets.tests.base import WidgetTest
class TestOWConcatenate(WidgetTest):
def setUp(self):
self.widget = self.create_widget(OWConcatenate)
self.iris = Table("iris")
self.titanic = Table("titanic")
def test_single_input(self):
self.assertIsNone(self.get_output("Data"))
self.send_signal("Primary Data", self.iris)
output = self.get_output("Data")
self.assertEqual(list(output), list(self.iris))
self.send_signal("Primary Data", None)
self.assertIsNone(self.get_output("Data"))
self.send_signal("Additional Data", self.iris)
output = self.get_output("Data")
self.assertEqual(list(output), list(self.iris))
self.send_signal("Additional Data", None)
self.assertIsNone(self.get_output("Data"))
def test_two_inputs_union(self):
self.send_signal("Additional Data", self.iris, 0)
self.send_signal("Additional Data", self.titanic, 1)
output = self.get_output("Data")
# needs to contain all instances
self.assertEqual(len(output), len(self.iris) + len(self.titanic))
# needs to contain all variables
outvars = output.domain.variables
self.assertLess(set(self.iris.domain.variables), set(outvars))
self.assertLess(set(self.titanic.domain.variables), set(outvars))
# the first part of the data set is iris, the second part is titanic
np.testing.assert_equal(self.iris.X, output.X[:len(self.iris), :-3])
self.assertTrue(np.isnan(output.X[:len(self.iris), -3:]).all())
np.testing.assert_equal(self.titanic.X, output.X[len(self.iris):, -3:])
self.assertTrue(np.isnan(output.X[len(self.iris):, :-3]).all())
def test_two_inputs_intersection(self):
self.send_signal("Additional Data", self.iris, 0)
self.send_signal("Additional Data", self.titanic, 1)
self.widget.controls.merge_type.buttons[1].click()
output = self.get_output("Data")
# needs to contain all instances
self.assertEqual(len(output), len(self.iris) + len(self.titanic))
# no common variables
outvars = output.domain.variables
self.assertEqual(0, len(outvars))
| Python | 0.999568 | |
ef026ce3b4bf7fc50499ce5ecb688c02bbc77544 | Add outline for orbital maneuver class | orbital/maneuver.py | orbital/maneuver.py | class Maneuver:
def __init__(self):
pass
@classmethod
def raise_apocenter_by(cls, delta, orbit):
pass
@classmethod
def change_apocenter_to(cls, apocenter, orbit):
pass
@classmethod
def lower_apocenter_by(cls, delta, orbit):
pass
@classmethod
def raise_pericenter_by(cls, delta, orbit):
pass
@classmethod
def change_pericenter_to(cls, pericenter, orbit):
pass
@classmethod
def lower_pericenter_by(cls, delta, orbit):
pass
@classmethod
def hohmann_transfer(cls):
# how to specify new orbit?
# - new semimajor axix/radius/altitude
pass
def bielliptic_transfer(cls):
pass
| Python | 0 | |
c191959db6b1a14d527ec41f910682fd017421ee | fix for handling spaces in sys.executable and in sut_path (issue 166) | doc/quickstart/testlibs/LoginLibrary.py | doc/quickstart/testlibs/LoginLibrary.py | import os
import sys
class LoginLibrary:
def __init__(self):
sut_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', 'sut', 'login.py')
self._command_prefix = '"%s" "%s" ' % (sys.executable, sut_path)
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
command = '%s %s %s' % (self._command_prefix, command, ' '.join(args))
process = os.popen(command)
self._status = process.read().strip()
process.close()
| import os
import sys
class LoginLibrary:
def __init__(self):
sut_path = os.path.join(os.path.dirname(__file__),
'..', 'sut', 'login.py')
self._command_prefix = '%s %s ' % (sys.executable, sut_path)
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
command = '%s %s %s' % (self._command_prefix, command, ' '.join(args))
process = os.popen(command)
self._status = process.read().strip()
process.close()
| Python | 0 |
ede05f2196dc7e96df01176f20b39772ac26e1ae | add python/logviewer.py | python/logviewer.py | python/logviewer.py | #!/usr/bin/python3
import io, os, re, sys
from http import HTTPStatus, server
FILE = None
INDEX = """<!DOCTYPE html>
<meta charset="utf-8">
<title>Log Viewer</title>
<script>
var logBox = null;
var lastOffset = 0;
function initialize() {
logBox = document.getElementById('log');
lastOffset = 0;
update();
}
function update() {
fetch('/get?offset=' + lastOffset).then(function(response) {
if (response.ok) {
return response.text();
}
}).then(function(text) {
lastOffset += text.length;
logBox.value += text; // FIXME: escape
logBox.scrollTop = logBox.scrollHeight; // Scroll to bottom
setTimeout(update, 3000);
});
}
</script>
<body onLoad="initialize();">
<textarea id="log" wrap="off" cols="120" rows="50" readonly="readonly">
</textarea>
"""
# INDEX = None # Dev mode
class HTTPRequestHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_OK("text/html", INDEX.encode())
elif self.path.startswith('/get?'):
# TODO: convert query string to a dict
m = re.search('offset=(\\d+)', self.path)
offset = int(m.group(1)) if m else 0
m = re.search('length=(\\d+)', self.path)
length = int(m.group(1)) if m else -1
FILE.seek(offset)
body = FILE.read(length)
self.send_OK("text/plain", body)
else:
self.send_error(HTTPStatus.NOT_FOUND, "File not found")
def send_OK(self, content_type, body):
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", content_type)
self.send_header('Content-Length', int(len(body)))
self.end_headers()
self.wfile.write(body)
def main(argv):
global FILE, INDEX
FILE = open(argv[1], 'rb')
if not INDEX:
INDEX = open(os.path.splitext(argv[0])[0] + '.html').read()
server.test(HandlerClass=HTTPRequestHandler)
if __name__ == '__main__':
main(sys.argv)
| Python | 0.000003 | |
3cbb02ebb1ba195f373c4b9238a49c30039f821e | revert changes | python/lottosend.py | python/lottosend.py | import json
import urllib2
class LottosendSDK:
#= Imports
#= Contrusctor
def __init__(self):
self.token = ''
self.lottosend_api = ''
self.results_api = ''
self.auto_login_url = ''
# signup user in lottosend system
def signupViaApi(self,first_name, last_name, prefix, phone, email, address, country, passwd, a_aid):
params = dict()
params = {
'web_user': {
'email': email,
'first_name': first_name,
'last_name': last_name,
'phone': phone,
'password': passwd,
'country': country,
'address': address,
'aid': a_aid
}
}
req = urllib2.Request(self.lottosend_api,
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
}, data = json.dumps(params))
return urllib2.urlopen(req).read()
# obtain user token to resign-in
def obtainToken(self,id):
req = urllib2.Request('%s/%s/token'%(self.lottosend_api,id),
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
})
return urllib2.urlopen(req).read()
# get all user info
def getUsersInfo(self):
req = urllib2.Request('%s/?last_synced_timestamp=1'%self.lottosend_api,
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
})
return urllib2.urlopen(req).read()
# get user transactions
def getUsersTransactions(self):
req = urllib2.Request('%s/transactions/?last_synced_timestamp=1'%self.lottosend_api,
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
})
return urllib2.urlopen(req).read()
| Python | 0.000001 | |
490af74a5b52d8014a8c3e13cfa6f015a4927cf4 | add a merge migration to ensure the two lead nodes don't cause a crash during a deploy | accelerator/migrations/0021_merge_20181011_1153.py | accelerator/migrations/0021_merge_20181011_1153.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-11 15:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0020_criterion_verbose_names'),
('accelerator', '0020_remove_is_open_from_program_family'),
]
operations = [
]
| Python | 0 | |
ba25fafa6b4572f1b7c8c7a901f5f7b75753c3c6 | Add Exercise 8.7. | Kane1985/Chapter4/Ex8.7.py | Kane1985/Chapter4/Ex8.7.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 8.7 from Kane 1985.
"""
from __future__ import division
from collections import OrderedDict
from sympy import diff, solve, simplify, symbols
from sympy import pi, sin, cos
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dot
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.mechanics import MechanicsStrPrinter
def msprint(expr):
pr = MechanicsStrPrinter()
return pr.doprint(expr)
def subs(x, *args, **kwargs):
if not hasattr(x, 'subs'):
if hasattr(x, '__iter__'):
return map(lambda x: subs(x, *args, **kwargs), x)
return x.subs(*args, **kwargs)
def partial_velocities(system, generalized_speeds, frame,
kde_map=None, constraint_map=None, express_frame=None):
partials = {}
if express_frame is None:
express_frame = frame
for p in system:
if isinstance(p, Point):
v = p.vel(frame)
elif isinstance(p, ReferenceFrame):
v = p.ang_vel_in(frame)
if kde_map is not None:
v = v.subs(kde_map)
if constraint_map is not None:
v = v.subs(constraint_map)
v_r_p = OrderedDict((u, v.diff(u, express_frame))
for u in generalized_speeds)
partials[p] = v_r_p
return partials
def generalized_active_forces(partials, force_pairs):
# use the same frame used in calculating partial velocities
v = partials.values()[0] # dict of partial velocities of the first item
ulist = v.keys() # list of generalized speeds in case user wants it
Flist = [0] * len(ulist)
for p, f in force_pairs:
for i, u in enumerate(ulist):
if partials[p][u] and f:
Flist[i] += dot(partials[p][u], f)
return Flist, ulist
## --- Declare symbols ---
# Define the system with 6 generalized speeds as follows:
q1, q2, q3 = dynamicsymbols('q1:4')
q1d, q2d, q3d = dynamicsymbols('q1:4', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
g, m, L, t = symbols('g m L t')
Q, R, S = symbols('Q R S')
# --- ReferenceFrames ---
N = ReferenceFrame('N')
# --- Define Points and set their velocities ---
# Simplify the system to 7 points, where each point is the aggregations of
# rods that are parallel horizontally.
pO = Point('O')
pO.set_vel(N, 0)
pP1 = pO.locatenew('P1', L/2*(cos(q1)*N.x + sin(q1)*N.y))
pP2 = pP1.locatenew('P2', L/2*(cos(q1)*N.x + sin(q1)*N.y))
pP3 = pP2.locatenew('P3', L/2*(cos(q2)*N.x - sin(q2)*N.y))
pP4 = pP3.locatenew('P4', L/2*(cos(q2)*N.x - sin(q2)*N.y))
pP5 = pP4.locatenew('P5', L/2*(cos(q3)*N.x + sin(q3)*N.y))
pP6 = pP5.locatenew('P6', L/2*(cos(q3)*N.x + sin(q3)*N.y))
for p in [pP1, pP2, pP3, pP4, pP5, pP6]:
p.set_vel(N, p.pos_from(pO).diff(t, N))
## --- Define kinematic differential equations/pseudo-generalized speeds ---
kde = [u1 - L*q1d, u2 - L*q2d, u3 - L*q3d]
kde_map = solve(kde, [q1d, q2d, q3d])
## --- Define contact/distance forces ---
forces = [(pP1, 6*m*g*N.x),
(pP2, S*N.y + 5*m*g*N.x),
(pP3, 6*m*g*N.x),
(pP4, -Q*N.y + 5*m*g*N.x),
(pP5, 6*m*g*N.x),
(pP6, R*N.y + 5*m*g*N.x)]
partials = partial_velocities([pP1, pP2, pP3, pP4, pP5, pP6], [u1, u2, u3], N, kde_map)
Fr, _ = generalized_active_forces(partials, forces)
print("Generalized active forces:")
for i, f in enumerate(Fr, 1):
print("F{0} = {1}".format(i, simplify(f)))
| Python | 0.000005 | |
46cef615f9d10279ea4907a542a87e4af22b37cd | Add A* pathfinding algorithm to utilities. | enactiveagents/utilities/pathfinding.py | enactiveagents/utilities/pathfinding.py | """
Module containing pathfinding utilities.
"""
import model
import Queue
class Pathfinding(object):
@staticmethod
def get_neighbours(world, position):
"""
Get all neighbours of a given position (cell).
:param world: The world
:param position: The given position (cell)
"""
neighbours = []
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if dx == 0 and dy == 0:
continue
if (position.get_x() + dx < 0
or position.get_y() + dy < 0
or position.get_x() + dx >= world.get_width()
or position.get_y() + dy >= world.get_height()):
continue
new_position = model.world.Position(position)
new_position.add((dx, dy))
add = True
entities = world.get_entities_at(new_position)
for entity in entities:
if entity.collidable():
add = False
break
if add:
neighbours.append(new_position)
return neighbours
@staticmethod
def heuristic(start, goal):
"""
Calculate the heuristic cost to get from start to the goal.
:param start: The starting position
:param goal: The goal position
"""
return abs(start.get_x() - goal.get_x()) + abs(start.get_y() - goal.get_y())
@staticmethod
def reconstruct_path(backtrack, goal):
path = []
current = goal
while backtrack[current] != None:
path.append(current)
current = backtrack[current]
return path
@staticmethod
def find_path(world, start, goal):
"""
Implements the A* algorithm to find a path from the start to the goal.
:param world: The world
:param start: The starting position
:param goal: The goal position
"""
priority_queue = Queue.PriorityQueue()
priority_queue.put(start, 0)
backtrack = {}
cost_to = {}
backtrack[start] = None
cost_to[start] = 0
while not priority_queue.empty():
current = priority_queue.get()
if current == goal:
# The goal has been found, so stop searching
break
for neighbour in Pathfinding.get_neighbours(world, current):
cost_to_neighbour = cost_to[current] + 1
if neighbour not in cost_to or cost_to_neighbour < cost_to[neighbour]:
cost_to[neighbour] = cost_to_neighbour
backtrack[neighbour] = current
priority = cost_to_neighbour + Pathfinding.heuristic(neighbour, goal)
priority_queue.put(neighbour, priority)
return (Pathfinding.reconstruct_path(backtrack, goal), cost_to[goal]) | Python | 0 | |
0ba701bd4459273df726e33709ae0e441bd4a767 | migrate username field to 150 chars | email_auth/migrations/0003_django110.py | email_auth/migrations/0003_django110.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-27 09:08
from __future__ import unicode_literals
from django import VERSION
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email_auth', '0002_auto_20160327_1119'),
]
operations = []
if VERSION >= (1, 10):
import django.contrib.auth.validators
operations.append(migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
))
| Python | 0.000002 | |
ccb3d15521c89c52119e2df35c07d379b4f23940 | Add tkinter app | demo/tkinter/hello-tkinter.py | demo/tkinter/hello-tkinter.py | import tkinter as tk
class App(tk.Frame):
'''
A demo application.
The App is a subclass of tk.Frame.
tk.Frame is a widget that lets you 'pack in' other widgets.
This way we can add in more than one widget to our application.
'''
def __init__(self, parent=None):
self.parent = parent
# Attach our App to a parent widget
super().__init__(parent)
# Set the parent widget size
parent.geometry('500x500')
# Place our App on the screen
self.pack()
# Add in the rest of our widgets
self.add_hello_label()
self.add_button()
self.add_slider()
self.add_counter()
self.add_entry()
def add_hello_label(self):
'''
Adds a Label to the bottom of our frame.
'''
# We set the Label text and font here
self.hello = tk.Label(self, text='Hello, World!', font=('Times New Roman', 20))
# This tells tkinter where to place the Label
self.hello.pack(side='bottom')
def add_button(self):
'''
Adds a Button to the top of our frame.
'''
# We set the button text and command here
self.change = tk.Button(self, text='Change the text', command=self.change_text)
# pack() defaults as pack(side='top')
self.change.pack()
def change_text(self):
'''
A command to change the Label's properties.
'''
# Properties of a widget are accessed like a dictionary
self.hello['text'] = 'This is changed text!'
self.hello['fg'] = 'white'
self.hello['bg'] = 'black'
def add_slider(self):
'''
Adds a slider to the top of the frame.
'''
self.slider = tk.Scale(self,
# Define the minimum and maximum slider values
from_=10,
to=30,
# The default is a vertical slider
orient=tk.HORIZONTAL,
# The command gets called every time the slider is moved
command=self.scale_text)
# Set the sliders initial value
self.slider.set(20)
self.slider.pack()
def scale_text(self, val):
'''
Changes the font size of our label.
'''
# Font size is not a property of the label, so we have
# use the config() method
self.hello.config(font=('Times New Roman', val))
def inc(self, event):
self.number['text'] = str(1+int(self.number['text']))
def dec(self, event):
self.number['text'] = str(-1+int(self.number['text']))
def add_counter(self):
'''
Creates a label whose value can be updated with arrow keys'
'''
self.number = tk.Label(self, text='0', font=(None, 20))
self.number.pack(side='bottom')
# Root handles keyboard entry here
self.parent.bind('<Up>', self.inc)
self.parent.bind('<Down>', self.dec)
def clear_entry(self, event):
'''
Empties the entry box and prints out its contents
'''
print(self.entry.get())
self.entry.delete(0, 'end')
def add_entry(self):
'''
Creates an entry box.
'''
self.entry_label = tk.Label(self, text='Input')
self.entry_label.pack(side='left')
self.entry = tk.Entry(self, bd=5)
self.entry.pack(side='right')
# Submit text when we hit Enter
self.entry.bind('<Return>', self.clear_entry)
# An empty root widget to build our application from
root = tk.Tk()
# Create our app and attach it to the root
app = App(parent=root)
# Run the app
app.mainloop()
| Python | 0 | |
ed20a93e917cfdddc5cd49cc6446b6e80fb4573d | Migrate symbtr uuid field to django type | makam/migrations/0007_auto_20150812_1615.py | makam/migrations/0007_auto_20150812_1615.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('makam', '0006_auto_20150727_1631'),
]
operations = [
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True),
),
migrations.RunSQL('alter table makam_symbtr alter COLUMN uuid type uuid USING ("uuid"::uuid)'),
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=models.UUIDField(db_index=True),
),
]
| Python | 0.000001 | |
e142530eef5754d4314d97f0d9e144f348d3909a | add docs_create_missing_stubs | maintenance/docs_create_missing_stubs.py | maintenance/docs_create_missing_stubs.py | import os
import subprocess
# hardcoded paths
HUNTER_DIR='..'
PACKAGES_DIR=os.path.join(HUNTER_DIR, 'cmake/projects')
DOCS_PKG_DIR=os.path.join(HUNTER_DIR, 'docs', 'packages', 'pkg')
# get all wiki entries
docs_filenames = [x for x in os.listdir(DOCS_PKG_DIR) if x.endswith('.rst')]
docs_entries = [x[:-4] for x in docs_filenames]
# get all hunter package entries
pkg_entries = [x for x in os.listdir(PACKAGES_DIR) if os.path.isdir(os.path.join(PACKAGES_DIR, x))]
pkg_entries_lower = [x.lower() for x in pkg_entries]
# packages both in hunter and wiki
pkg_match = [x for x in pkg_entries if x in docs_entries]
# packages only in hunter
pkg_only_hunter = [x for x in pkg_entries if x not in pkg_match]
# output directories
packages_dir = 'packages'
tmp_dir = 'packages/tmp'
only_hunter_dir = 'packages/only_hunter'
# create if not exist
for d in [packages_dir, tmp_dir, only_hunter_dir]:
if not os.path.exists(d):
os.mkdir(d)
# header for rst files
header_format_string = """.. spelling::
{}
.. _pkg.{}:
{}
{}
"""
# create dummy entries for packages only in hunter
for entry in pkg_only_hunter:
source_md = os.path.join(WIKI_DIR, 'pkg.' + entry.lower() + '.md')
tmp_rst = os.path.join(tmp_dir, entry + '.rst')
target_rst = os.path.join(only_hunter_dir, entry + '.rst')
underscores = "=" * len(entry)
header = header_format_string.format(entry, entry, entry, underscores)
#print(header)
with open(target_rst, 'w') as f:
f.write(header)
print("pkg_match entries: ", len(pkg_match))
print("pkg_only_hunter entries: ", len(pkg_only_hunter)) | Python | 0 | |
52c50ca6e4c5d2ee75300617c5da118fb1136e76 | Add custom plot style contour_image. | mplstyles/plots.py | mplstyles/plots.py | from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_labelsize=9,contour_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, fontsize=contour_labelsize)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS | Python | 0 | |
a3e06625c1e16a17c65aefc6bb570d769ec9f56a | Test for bot_update.py. | tests/bot_update_test.py | tests/bot_update_test.py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import json
import os
from subprocess import Popen, PIPE
import sys
import tempfile
import threading
import unittest
BUILD_DIR = os.path.realpath(os.path.join(
os.path.dirname(__file__), '..'))
BOT_UPDATE_PATH = os.path.join(BUILD_DIR, 'scripts', 'slave', 'bot_update.py')
SLAVE_DIR = os.path.join(BUILD_DIR, 'slave')
chromium_utils = imp.load_source(
'chromium_utils',
os.path.join(BUILD_DIR, 'scripts', 'common', 'chromium_utils.py'))
class BotUpdateTest(unittest.TestCase):
# TODO(szager): Maybe replace this with a local temporary gerrit instance.
GIT_HOST = 'https://t3st-chr0m3.googlesource.com'
def setUp(self):
prefix = self.id().lstrip('__main__.')
testname = prefix.split('.')[-1]
self.workdir = tempfile.mkdtemp(dir=SLAVE_DIR, prefix=prefix)
self.builddir = os.path.join(self.workdir, 'build')
os.mkdir(self.builddir)
self.bu_cmd = [
sys.executable, BOT_UPDATE_PATH, '--force',
'--output_json', os.path.join(self.builddir, 'out.json'),
'--master', '%s_master' % testname,
'--builder_name', '%s_builder' % testname,
'--slave_name', '%s_slave' % testname ]
def tearDown(self):
chromium_utils.RemoveDirectory(self.workdir)
@staticmethod
def _subproc_thread_main(cmd, cwd):
thr = threading.current_thread()
thr.p = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=cwd)
(stdout, stderr) = thr.p.communicate()
thr.stdout = stdout
thr.stderr = stderr
def _subproc(self, cmd, cwd, timeout=15):
thr = threading.Thread(
target=self._subproc_thread_main, args=(cmd, cwd))
thr.daemon = True
thr.start()
thr.join(timeout)
if thr.isAlive():
thr.p.terminate()
self.fail('A subprocess timed out after %d seconds' % timeout)
return (thr.p.returncode, thr.stdout, thr.stderr)
@staticmethod
def _dump_subproc(cmd, cwd, status, stdout, stderr):
sep = ('#' * 80) + '\n'
print sep, 'Subprocess failed with status %d.\n' % status
print cmd, '\n\n... in %s\n' % cwd
print sep, '# stdout\n', sep, stdout, '\n'
print sep, '# stderr\n', sep, stderr, '\n', sep
@staticmethod
def _get_files(d):
result = []
for dirpath, dirnames, filenames in os.walk(d):
for f in filenames:
result.append(
os.path.join(dirpath.replace(d, '').lstrip('/'), f))
try:
dirnames.remove('.git')
except ValueError:
pass
return result
def test_001_simple(self):
solution = { 'name': 'top',
'url': '%s/BotUpdateTest/test_001_top.git' % self.GIT_HOST,
'deps_file': 'DEPS' }
gclient_spec = 'solutions=[%r]' % solution
self.bu_cmd.extend([
'--post-flag-day',
'--specs', gclient_spec,
'--revision', '91ea82d7125be47db12ccb973a2c6574eca0f342'])
status, stdout, stderr = self._subproc(self.bu_cmd, self.builddir)
if status != 0:
self._dump_subproc(self.bu_cmd, self.builddir, status, stdout, stderr)
self.assertEqual(status, 0)
expected_files = [
'DEPS',
'file.txt',
'ext/dep1/file.txt',
'ext/dep2/file.txt',
]
topdir = os.path.join(self.builddir, 'top')
self.assertItemsEqual(expected_files, self._get_files(topdir))
expected_json = {
'root': 'top',
'properties': {},
'did_run': True,
'patch_root': None
}
with open(os.path.join(self.builddir, 'out.json')) as fh:
actual_json = json.load(fh)
self.assertDictContainsSubset(expected_json, actual_json)
if __name__ == '__main__':
unittest.main()
| Python | 0.000003 | |
e2a0a27e853e1e8c8913e9851d2a7aa0fb18b3ee | add exception test | tests/exceptions_test.py | tests/exceptions_test.py | # -*- coding: utf-8 -
#
# Copyright (c) 2008 (c) Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import unittest
class ResourceTestCase(unittest.TestCase):
def testForceException(self):
import webob.exc
from restkit.errors import use_simple_exception, use_wsgi_exception
use_simple_exception()
from restkit.errors import ResourceError
self.assert_(issubclass(ResourceError, webob.exc.WSGIHTTPException) == False)
use_wsgi_exception()
def testWebobException(self):
import webob.exc
from restkit.errors import ResourceError
self.assert_(issubclass(ResourceError, webob.exc.WSGIHTTPException) == True)
if __name__ == '__main__':
unittest.main() | Python | 0.000001 | |
9231511307631ad92b896941607c4e5f3c7704ce | Create new script for attaching and releasing the gripper's compressor glove. | cs473_baxter/scripts/glove.py | cs473_baxter/scripts/glove.py | #!/usr/bin/env python
import argparse
import rospy
import baxter_interface
class Glove():
def __init__(self, gripper):
self.gripper = Gripper(gripper)
# Verify robot is enabled
print "Getting robot state..."
self._rs = baxter_interface.RobotEnable()
self._init_state = self._rs.state().enabled
print "Enabling robot..."
self._rs.enable()
print "Running. Ctrl-c to quit"
def grip_glove(self):
self.gripper.calibrate()
self.gripper.open()
# set moving force
# set holding force
# prompt for glove
# grip glove
def release_glove(self):
self.gripper.open()
def clean_shutdown(self):
print "\nExiting glove routine..."
if not self._init_state and self._rs.state().enabled:
print "Disabling robot..."
self._rs.disable()
def main():
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
parser.add_argument(
'-g', '--grip', choices=['grip', 'release'], required=True,
help="grip or release glove"
)
args = parser.parse_args(rospy.myargv()[1:])
g = Glove('right')
# register shutdown callback
rospy.on_shutdown(g.clean_shutdown)
if args.grip is 'grip':
g.grip_glove()
else:
g.release_glove()
if __name__ == '__main__':
main()
| Python | 0 | |
bc2a707ea12716612422959b107b72c84d9dc946 | add test for dump_table_to_json() | tests/test_dump_table.py | tests/test_dump_table.py | import scraperwiki
import unittest
from dc_base_scrapers.common import dump_table_to_json
class DumpTableTests(unittest.TestCase):
def test_dump_table(self):
# create tables with same columns in different order
scraperwiki.sqlite.execute("""CREATE TABLE foo (
b TEXT,
a INT,
c TEXT
);""")
scraperwiki.sqlite.execute("""CREATE TABLE bar (
c TEXT,
b TEXT,
a INT
);""")
# insert same content differently ordered
foo_records = [
{'a': 2, 'b': 'foo', 'c': 'foo'},
{'a': 1, 'b': 'foo', 'c': 'foo'},
{'a': 3, 'b': 'foo', 'c': 'foo'},
]
for rec in foo_records:
scraperwiki.sqlite.save(unique_keys='a', table_name='foo', data=rec)
scraperwiki.sqlite.commit_transactions()
bar_records = [
{'a': 2, 'b': 'foo', 'c': 'foo'},
{'a': 3, 'b': 'foo', 'c': 'foo'},
{'a': 1, 'b': 'foo', 'c': 'foo'},
]
for rec in bar_records:
scraperwiki.sqlite.save(unique_keys='a', table_name='bar', data=rec)
scraperwiki.sqlite.commit_transactions()
# check that json representation is consistent
foo_json = dump_table_to_json('foo', 'a')
bar_json = dump_table_to_json('bar', 'a')
self.assertEqual(foo_json, bar_json)
| Python | 0.000001 | |
5207d3c91d64170d783388a064334e495b3b562c | Add a new test for the latest RegexLexer change, multiple new states including '#pop'. | tests/test_regexlexer.py | tests/test_regexlexer.py | # -*- coding: utf-8 -*-
"""
Pygments regex lexer tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
from pygments.token import Text
from pygments.lexer import RegexLexer
class TestLexer(RegexLexer):
"""Test tuple state transitions including #pop."""
tokens = {
'root': [
('a', Text.Root, 'rag'),
('e', Text.Root),
],
'beer': [
('d', Text.Beer, ('#pop', '#pop')),
],
'rag': [
('b', Text.Rag, '#push'),
('c', Text.Rag, ('#pop', 'beer')),
],
}
class TupleTransTest(unittest.TestCase):
def test(self):
lx = TestLexer()
toks = list(lx.get_tokens_unprocessed('abcde'))
self.assertEquals(toks,
[(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
(3, Text.Beer, 'd'), (4, Text.Root, 'e')])
| Python | 0.000007 | |
68afd4f71e1448017a7ed4775d7e70a26ff7c91b | add tests for new validation logic | tests/test_validation.py | tests/test_validation.py | from __future__ import unicode_literals
import copy
import pytest
from jsonschema.exceptions import ValidationError
from scrapi.linter import NormalizedDocument
class TestValidation(object):
def test_validate_with_clean(self):
expected = {
"description": "This is a test",
"contributors": [
{
"name": "Test Testerson Jr",
"givenName": "Test",
"familyName": "Testerson",
"additionalName": "",
"ORCID": None,
"email": ""
}
],
'title': '',
'subjects': ['Math'],
'uris': {
"canonicalUri": "http://example.com"
},
"providerUpdatedDateTime": "2015-02-02T00:00:00Z",
"shareProperties": {
"source": "test"
}
}
doc = NormalizedDocument(to_be_validated, clean=True)
assert doc.attributes == expected
def test_validate(self):
expected = {
"description": "This is a test",
"contributors": [
{
"name": "Test Testerson Jr",
"givenName": "Test",
"familyName": "Testerson",
"additionalName": "",
"ORCID": None,
"email": ""
}
],
'title': '',
'tags': ['', '', ''],
'subjects': ['', 'Math'],
'uris': {
"canonicalUri": "http://example.com"
},
"providerUpdatedDateTime": "2015-02-02T00:00:00Z",
"shareProperties": {
"source": "test"
},
"otherProperties": [
{
"name": "Empty2",
"properties": {
"Empty2": None
}
}
]
}
doc = NormalizedDocument(to_be_validated)
assert doc.attributes == expected
def test_validate_fails(self):
to_be_tested = copy.deepcopy(to_be_validated)
to_be_tested['providerUpdatedDateTime'] = 'Yesterday'
with pytest.raises(ValidationError) as e:
doc = NormalizedDocument(to_be_tested)
with pytest.raises(ValidationError) as e:
doc = NormalizedDocument(to_be_tested, validate=False)
doc.validate()
to_be_validated = {
"description": "This is a test",
"contributors": [
{
"name": "Test Testerson Jr",
"givenName": "Test",
"familyName": "Testerson",
"additionalName": "",
"ORCID": None,
"email": ""
}
],
'title': '',
'tags': ['', '', ''],
'subjects': ['', 'Math'],
'uris': {
"canonicalUri": "http://example.com"
},
"providerUpdatedDateTime": "2015-02-02T00:00:00Z",
"shareProperties": {
"source": "test"
},
"otherProperties": [
{
"name": "Empty2",
"properties": {
"Empty2": None
}
}
]
}
| Python | 0 | |
9445433b54fcbd7f56617fff853b761107bc94cc | Test add | a.py | a.py | """
Comment
"""
print "apa"
| Python | 0 | |
c1b34a71306af1c38f305981dc1d50135b2887d8 | add the missing new executor.py file | asyncio/executor.py | asyncio/executor.py | from .log import logger
__all__ = (
'CancelledError', 'TimeoutError',
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
)
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
try:
import concurrent.futures
import concurrent.futures._base
except ImportError:
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
class Future(object):
def __init__(self, callback, args):
try:
self._result = callback(*args)
self._exception = None
except Exception as err:
raise
self._result = None
self._exception = err
self.callbacks = []
def cancelled(self):
return False
def done(self):
return True
def exception(self):
return self._exception
def result(self):
if self._exception is not None:
raise self._exception
else:
return self._result
def add_done_callback(self, callback):
callback(self)
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class SynchronousExecutor:
"""
Synchronous executor: submit() blocks until it gets the result.
"""
def submit(self, callback, *args):
return Future(callback, args)
def shutdown(self, wait):
pass
def get_default_executor():
logger.error("concurrent.futures module is missing: "
"use a synchrounous executor as fallback!")
return SynchronousExecutor()
else:
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
Future = concurrent.futures.Future
Error = concurrent.futures._base.Error
CancelledError = concurrent.futures.CancelledError
TimeoutError = concurrent.futures.TimeoutError
def get_default_executor():
return concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
| Python | 0.000004 | |
fad65e68b3fcfa736ba5d6e62fbe0588100dc153 | Create gdax-myTrades-pagination.py | examples/py/gdax-myTrades-pagination.py | examples/py/gdax-myTrades-pagination.py | # -*- coding: utf-8 -*-
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
'''
Example snippet to traverse GDAX / CoinBase Pro pagination.
Useful for reaching back more than 100 myTrades, the same works
for fetchClosedOrders
'''
import ccxt
exchange = ccxt.gdax({
"apiKey": "123456",
"secret": "/abcdefghijklmnop/w==",
"password": "987654321",
"enableRateLimit": True
})
#use sandbox url
exchange.urls['api'] = exchange.urls['test']
param_key=''
param_value=''
allMyTrades: list = []
while True:
myTrades = exchange.fetchMyTrades(symbol='BTC/USD', params={param_key: param_value})
# Handle gdax with pagination ...
if exchange.last_response_headers._store.get('cb-after'):
param_key = 'after'
param_value = exchange.last_response_headers._store['cb-after'][1]
allMyTrades.extend(myTrades)
else:
allMyTrades.extend(myTrades)
break
for trade in allMyTrades:
print(trade)
| Python | 0 | |
6cc59b5ad1b70e0b303680d9e58c8d8158bec1e6 | Create solution.py | hackerrank/algorithms/implementation/easy/sock_merchant/py/solution.py | hackerrank/algorithms/implementation/easy/sock_merchant/py/solution.py | #!/bin/python3
import sys
import collections
n = int(input().strip())
c = map(int, input().strip().split(' '))
pairCount = sum(count // 2 for count in collections.Counter(c).values())
print(pairCount)
| Python | 0.000018 | |
bb37514e110892a8a896b43173fa6288ec1685d4 | Add script to count the number of times a boolean key occurs and also optionally emit a new YAML file containing only results where the key matches True, False or None. | analysis/count_boolean_key.py | analysis/count_boolean_key.py | #!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
import argparse
import os
import logging
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('bool_key', help='top level key of boolean type')
parser.add_argument('result_yml', type=argparse.FileType('r'), help='Input YAML file')
parser.add_argument('--write-true', dest='write_true', default=None, help='Write results that have bool_key set to true as a YAML file')
parser.add_argument('--write-false', dest='write_false', default=None, help='Write results that have bool_key set to false as a YAML file')
parser.add_argument('--write-none', dest='write_none', default=None, help='Write results that have bool_key set to null (None) as a YAML file')
pargs = parser.parse_args(args)
logging.info('Loading YAML file')
results = yaml.load(pargs.result_yml, Loader=Loader)
logging.info('Loading complete')
assert isinstance(results, list)
trueList = [ ]
falseList = [ ]
noneList = [ ]
for r in results:
if not pargs.bool_key in r:
logging.error('Key {} not in result'.format(pargs.bool_key))
return 1
value = r[pargs.bool_key]
if (not isinstance(value, bool)) and value != None:
logging.error('Key {} does not map to boolean or None')
return 1
if value == True:
trueList.append(r)
elif value == False:
falseList.append(r)
elif value == None:
noneList.append(r)
else:
logging.error('unreachable!')
return 1
# print results
print("Total {} keys: {}".format(pargs.bool_key, len(trueList) + len(falseList) + len(noneList)))
print("# of True: {}".format(len(trueList)))
print("# of not True {} ({} false, {} None)".format( len(falseList) + len(noneList), len(falseList) , len(noneList)))
writeFilteredResults(pargs.write_true, trueList, pargs.bool_key)
writeFilteredResults(pargs.write_false, falseList, pargs.bool_key)
writeFilteredResults(pargs.write_none, noneList, pargs.bool_key)
return 0
def writeFilteredResults(fileName, listToUse, key):
if fileName != None:
if os.path.exists(fileName):
logging.error('Refusing to overwrite {}'.format(fileName))
sys.exit(1)
if len(listToUse) == 0:
logging.info('Result list is empty not writing')
return
with open(fileName, 'w') as f:
logging.info('Writing results with "{}" key set to "{}" to file {}'.format(key, str(listToUse[0][key]) ,fileName))
yamlString = yaml.dump(listToUse, Dumper=Dumper, default_flow_style=False)
f.write(yamlString)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Python | 0.000005 | |
bf01ea13c046d711939c1bb0aaedf9fbbc7c638d | Add initial systemd module | salt/modules/systemd.py | salt/modules/systemd.py | '''
Provide the service module for systemd
'''
def __virtual__():
'''
Only work on systems which default to systemd
'''
if __grains__['os'] == 'Fedora' and __grains__['osrelease'] > 15:
return 'service'
return False
def start(name):
'''
Start the specified service with systemd
CLI Example::
salt '*' service.start <service name>
'''
cmd = 'systemctl start {0}.service'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specifed service with systemd
CLI Example::
salt '*' service.stop <service name>
'''
cmd = 'systemctl stop {0}.service'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Start the specified service with systemd
CLI Example::
salt '*' service.start <service name>
'''
cmd = 'systemctl restart {0}.service'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name):
'''
Return the status for a service via systemd, returns the PID if the service
is running or an empty string if the service is not running
'''
cmd = ("systemctl restart {0}.service | grep 'Main PID'"
" | awk '{print $3}'").format(name)
return __salt__['cmd.run'](cmd).strip()
| Python | 0.000001 | |
b1738d70e3a90e7bf27c9eeccb25b09403b74f1a | Add transport factory | devicehive/transport.py | devicehive/transport.py | def init(name, data_format, data_format_options, handler, handler_options):
transport_class_name = '%sTransport' % name.title()
transport_module = __import__('devicehive.transports.%s_transport' % name,
fromlist=[transport_class_name])
return getattr(transport_module, transport_class_name)(data_format,
data_format_options,
handler,
handler_options)
| Python | 0.000004 | |
9ba1dd92919fb37862e6e94bf55cc25e7be3b009 | add co.py | co.py | co.py | #!/bin/env python3
import functools
def coroutine(f):
@functools.wraps(f)
def _coroutine(*args, **kwargs):
active_coroutine = f(*args, **kwargs)
next(active_coroutine)
return active_coroutine
return _coroutine
@coroutine
def simple_coroutine():
print('Setting up the coroutine')
try:
while True:
item = yield
print('Got item: %r' % item)
except GeneratorExit:
print('Normal exit')
except Exception as e:
print('Exception exit: %r' % e)
raise
finally:
print('Any exit')
print('Creating simple coroutine')
active_coroutine = simple_coroutine()
print()
print('Sending spam')
active_coroutine.send('spam')
print()
print('Close the coroutine')
active_coroutine.close()
print()
print('Creating simple coroutine')
active_coroutine = simple_coroutine()
print()
print('Sending eggs')
active_coroutine.send('eggs')
print()
print('Throwing runtime error')
active_coroutine.throw(RuntimeError, 'Oops...')
print()
| Python | 0.000044 | |
02844d3a2ed329a02afaaf8dc1ad07407768a68b | Create app.py | app.py | app.py | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from flask import Flask
from flask import request
import requests
app = Flask(__name__)
def get_allergies():
URL = 'http://saallergy.info/today'
HEADERS = {'accept': 'application/json'}
r = requests.get(URL, headers=HEADERS)
data = r.json()
date = data['results'][0]['date']
text = 'Allergies for %s: ' % date
for a in data['results']:
text = text + '%s - %s (%s) | ' % (a['allergen'], a['level'],
a['count'])
text = text.rstrip(' ')
text = text.rstrip('|')
return text
@app.route("/allergies")
def allergies():
allergies_str = get_allergies()
return allergies_str
if __name__ == "__main__":
app.run(host='0.0.0.0')
| Python | 0.000003 | |
43d73b7bdc8b38b3e2e583a0321936ab80c0f4e0 | Add bot.py | bot.py | bot.py | import praw
r = praw.Reddit('/u/powderblock Glasses Bot')
for post in r.get_subreddit('all').get_new(limit=5):
print(str(post.url))
| Python | 0 | |
42389c93d11de00b50b08fcd1eca74fbe3941365 | Create banner-generator.py | banner-generator.py | banner-generator.py | #!/usr/bin/python
#####################################################
# grabs a banner image from flaming text
# and saves it to the project directory as banner.png
#####################################################
import urllib
import random
word_file = "words.txt"
WORDS = open(word_file).read().splitlines()
word1 = random.choice(WORDS) + '+' + random.choice(WORDS)
myurl = "http://www.flamingtext.com/net-fu/proxy_form.cgi?imageoutput=true&script=dance-logo&text="+mytext
urllib.urlretrieve(myurl, "banner.png")
| Python | 0.000001 | |
45b789010409e4e2e2afc88cb776c8b70e7768ec | Add unit test for DakotaBase | dakota/tests/test_dakota_base.py | dakota/tests/test_dakota_base.py | #!/usr/bin/env python
#
# Tests for dakota.dakota_base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
import filecmp
from nose.tools import *
from dakota.dakota_base import DakotaBase
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** DakotaBase tests')
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
| Python | 0 | |
b0ec56421c65c744c62c98850b69719fa6c351a1 | Update base.py | docs/examples/pyramid/base.py | docs/examples/pyramid/base.py |
import json
from pyramid.response import Response as PyramidResponse
from oauth2.web import Response
from oauth2.error import OAuthInvalidError, \
ClientNotFoundError, OAuthInvalidNoRedirectError, UnsupportedGrantError, ParameterMissingError
from oauth2.client_authenticator import ClientAuthenticator, request_body
from oauth2.tokengenerator import Uuid4
class Request():
"""
Contains data of the current HTTP request.
"""
def __init__(self, env):
self.method = env.method
self.params = env.json_body
self.registry = env.registry
self.headers = env.registry
def post_param(self, name):
return self.params.get(name)
class BaseAuthController(object):
def __init__(self, request, site_adapter):
self.request = Request(request)
self.site_adapter = site_adapter
self.token_generator=Uuid4()
self.client_store = self._get_client_store()
self.access_token_store = self._get_token_store()
self.client_authenticator = ClientAuthenticator(
client_store=self.client_store,
source=request_body
)
self.grant_types = [];
@classmethod
def _get_token_store(cls):
NotImplementedError
@classmethod
def _get_client_store(cls):
NotImplementedError
def add_grant(self, grant):
"""
Adds a Grant that the provider should support.
:param grant: An instance of a class that extends
:class:`oauth2.grant.GrantHandlerFactory`
"""
if hasattr(grant, "expires_in"):
self.token_generator.expires_in[grant.grant_type] = grant.expires_in
if hasattr(grant, "refresh_expires_in"):
self.token_generator.refresh_expires_in = grant.refresh_expires_in
self.grant_types.append(grant)
def _determine_grant_type(self, request):
for grant in self.grant_types:
grant_handler = grant(request, self)
if grant_handler is not None:
return grant_handler
raise UnsupportedGrantError
def authenticate(self):
response = Response()
grant_type = self._determine_grant_type(self.request)
grant_type.read_validate_params(self.request)
grant_type.process(self.request, response, {})
return PyramidResponse(body=response.body, status=response.status_code, content_type="application/json")
| __author__ = 'Bhoomit'
import json
from pyramid.response import Response as PyramidResponse
from oauth2.web import Response
from oauth2.error import OAuthInvalidError, \
ClientNotFoundError, OAuthInvalidNoRedirectError, UnsupportedGrantError, ParameterMissingError
from oauth2.client_authenticator import ClientAuthenticator, request_body
from oauth2.tokengenerator import Uuid4
class Request():
"""
Contains data of the current HTTP request.
"""
def __init__(self, env):
self.method = env.method
self.params = env.json_body
self.registry = env.registry
self.headers = env.registry
def post_param(self, name):
return self.params.get(name)
class BaseAuthController(object):
def __init__(self, request, site_adapter):
self.request = Request(request)
self.site_adapter = site_adapter
self.token_generator=Uuid4()
self.client_store = self._get_client_store()
self.access_token_store = self._get_token_store()
self.client_authenticator = ClientAuthenticator(
client_store=self.client_store,
source=request_body
)
self.grant_types = [];
@classmethod
def _get_token_store(cls):
NotImplementedError
@classmethod
def _get_client_store(cls):
NotImplementedError
def add_grant(self, grant):
"""
Adds a Grant that the provider should support.
:param grant: An instance of a class that extends
:class:`oauth2.grant.GrantHandlerFactory`
"""
if hasattr(grant, "expires_in"):
self.token_generator.expires_in[grant.grant_type] = grant.expires_in
if hasattr(grant, "refresh_expires_in"):
self.token_generator.refresh_expires_in = grant.refresh_expires_in
self.grant_types.append(grant)
def _determine_grant_type(self, request):
for grant in self.grant_types:
grant_handler = grant(request, self)
if grant_handler is not None:
return grant_handler
raise UnsupportedGrantError
def authenticate(self):
response = Response()
grant_type = self._determine_grant_type(self.request)
grant_type.read_validate_params(self.request)
grant_type.process(self.request, response, {})
return PyramidResponse(body=response.body, status=response.status_code, content_type="application/json") | Python | 0.000001 |
debca45d27e414d09c4814bec14d49b22e166274 | Add tool to process familyname and style data. | nototools/check_familyname_and_styles.py | nototools/check_familyname_and_styles.py | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process a family description file.
You can check the file, and generate a list of font file names from it.
This list can be passed to noto_names to generate the name data.
The file is a list of Noto family names (see noto_fonts.py) interspersed with
definitions of what WWS style combinations apply to that file. See
_get_stylenames() for the format. Each style definition applies to each
following family until the next style definition."""
import argparse
import re
from nototools import noto_fonts
_style_re = re.compile(r'--\s+(.*)\s+--')
_extended_style_re = re.compile(r'^([TRBH]+)(?:/([CR]+)(?:/([RI]+))?)?$')
# Below we use the longest names we intend, so that the noto_names code can
# identify which families need extra short abbreviations. The style of
# abbreviation is based on the longest names in the family.
_WEIGHT_NAMES = {
'T': 'Thin',
'R': 'Regular',
'B': 'Bold',
'H': 'ExtraBold' # Nee 'Heavy'. Not 'Black' because 'ExtraBold' is longer.
}
_WIDTH_NAMES = {
'C': 'SemiCondensed', # We use this since it is longer. We don't expect to
# use ExtraCondensed.
'R': ''
}
_ITALIC_NAMES = {
'I': 'Italic',
'R': '',
}
def _get_stylenames(styles):
"""Returns the list of style names for the encoded styles. These are the
(master-ish) style names encoded as weights / widths/ italic, where weights
is any of 'T', 'R', 'B', or 'H', widths any of 'C' or 'R', and italic 'I'.
If there's not an italic then the italic is omitted, if there's only
regular width and no italic then widths are omitted."""
m = _extended_style_re.match(styles)
assert m
weights = m.group(1)
widths = m.group(2) or 'R'
slopes = m.group(3) or 'R'
# print '%s: %s, %s, %s' % (styles, weights, widths, slopes)
names = []
for wd in widths:
width_name = _WIDTH_NAMES[wd]
for wt in weights:
weight_name = _WEIGHT_NAMES[wt]
for it in slopes:
italic_name = _ITALIC_NAMES[it]
final_weight_name = weight_name
if wt == 'R' and (width_name or italic_name):
final_weight_name = ''
names.append(width_name + final_weight_name + italic_name)
return names
def check_familyname(name, styles):
notofont = noto_fonts.get_noto_font('unhinted/' + name + '-Regular.ttf')
if not notofont:
print 'Error: could not parse', name
return False
print name, noto_fonts.noto_font_to_wws_family_id(notofont), styles
return True
def generate_family_filenames(name, styles):
"""Name is the family name portion of a Noto filename. Styles is the
encoding of the styles, see _get_stylenames."""
stylenames = _get_stylenames(styles)
return [name + '-' + s + '.ttf' for s in stylenames]
def _for_all_familynames(namefile, fn):
"""Call fn passing the family name and style descriptor for
all families in namefile. '#' is a comment to eol, blank lines are
ignored."""
styles = 'RB'
with open(namefile, 'r') as f:
for name in f:
ix = name.find('#')
if ix >= 0:
name = name[:ix]
name = name.strip()
if not name:
continue
m = _style_re.match(name)
if m:
styles = m.group(1)
continue
assert name[0] != '-'
fn(name, styles)
def check_familynames(namefile):
passed = [True]
def fn(namefile, styles):
name_passed = check_familyname(namefile, styles)
passed[0] &= name_passed
_for_all_familynames(namefile, fn)
return passed[0]
def generate_filenames(namefile, outfile):
namelist = []
def fn(name, styles):
namelist.extend(generate_family_filenames(name, styles))
_for_all_familynames(namefile, fn)
allnames = '\n'.join(namelist)
if outfile:
with open(outfile, 'w') as f:
f.write(allnames)
f.write('\n')
else:
print allnames
def main():
DEFAULT_NAMEDATA = 'familyname_and_styles.txt'
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--familynamedata', help='file containing family name/style data'
' (default %s)' % DEFAULT_NAMEDATA, metavar='file',
default=DEFAULT_NAMEDATA)
parser.add_argument(
'-c', '--check', help='check family name/style data', action='store_true')
parser.add_argument(
'-w', '--write', help='write filenames, default stdout', nargs='?',
const='stdout', metavar='file')
args = parser.parse_args()
if args.check:
passed = check_familynames(args.familynamedata)
if not passed:
print 'Check failed, some files had errors.'
return
print 'Check succeeded.'
if args.write:
outfile = None if args.write == 'stdout' else args.write
if not outfile and args.check:
print
generate_filenames(args.familynamedata, outfile)
if outfile:
print 'Wrote', outfile
if __name__ == '__main__':
main()
| Python | 0 | |
b7b85f84d5bfd46961dd914ef06d8e288529d06a | Add `oeis` modulo to interact with OEIS online encyclopedia. | python-libs/oeis.py | python-libs/oeis.py |
from itertools import count
#________________________________________________________________________
def promote_id_anchors(text):
"""
Return a new string where each occurrence of a sequence ref `Aoooooo` replaced as `<a>` tag.
"""
import re
return re.compile('(?P<id>A\d{6,6})').sub(r'<a href="http://oeis.org/\g<id>">\g<id></a>', text)
def merge_splitted_text(lst):
"""
Returns a new list where each splitted text in `lst` is joined into a single line of text.
"""
merged = []
i = 0
while i < len(lst):
if '(Start)' in lst[i] or '(start)' in lst[i]:
j = i+1
while j < len(lst) and not ('(End)' in lst[j] or '(end)' in lst[j]): j += 1
joiner = "\n - "
#joiner = "\n<br>"
merged.append(joiner.join(lst[i:j if lst[j] == '(End)' else j+1])
.replace('(Start)', '').replace('(start)', '')
.replace('(End)', '').replace('(end)', ''))
i = j+1
else:
merged.append(lst[i])
i += 1
return merged
#________________________________________________________________________
class head_builder:
def __call__(self, doc):
head = r"<div align='center'><b><a href='http://oeis.org/A{:06d}'>A{:06d}</a></b>: <i>{}</i><br></div>".format(
doc['number'], doc['number'], doc['name'], ) + "\n\nby {}".format(doc['author'])
return head
class keyword_builder:
def __call__(self, doc):
keyword = "\n_Keywords_: `{}`".format(doc['keyword'])
return keyword
class data_builder:
def __init__(self, upper_limit):
self.upper_limit = upper_limit
def __call__(self, doc):
seq = doc['data'].split(',')[:self.upper_limit]
array_template = r'''
$$
\begin{env}{{c|{nel}}}
n & {nat} \\
\hline
{id}(n) & {seq}
\end{env}
$$
'''.format(env="{array}",
nel='c'*len(seq),
nat=" & ".join([str(i) for i in range(int(doc['offset'].split(',')[0]), len(seq))]),
id="A{:06d}".format(doc['number']),
seq = " & ".join(seq))
data = "\n_Data_:\n{}".format(array_template)
return data
class content_builder:
def __init__(self, filter_pred):
self.filter_pred = filter_pred
def __call__(self, content):
mapped = map(lambda pair: pair[1],
filter(lambda pair: self.filter_pred(pair[0], pair[1].lower()),
zip(count(), map(promote_id_anchors, merge_splitted_text(content)))))
return list(mapped)
class comment_builder(content_builder):
def __call__(self, doc):
if 'comment' not in doc: return ""
comments = super(comment_builder, self).__call__(doc['comment'])
return ("\n_Comments_:\n - " + "\n - ".join(comments)) if comments else ""
class formula_builder(content_builder):
def __call__(self, doc):
if 'formula' not in doc: return ""
formulae = super(formula_builder, self).__call__(doc['formula'])
return ("\n_Formulae_:\n - " + "\n - ".join(formulae)) if formulae else ""
class xref_builder(content_builder):
def __call__(self, doc):
if 'xref' not in doc: return ""
xrefs = super(xref_builder, self).__call__(doc['xref'])
return ("\n_Cross references_:\n - " + "\n - ".join(xrefs)) if xrefs else ""
class link_builder(content_builder):
def __call__(self, doc):
if 'link' not in doc: return ""
links = super(link_builder, self).__call__(doc['link'])
return ("\n_Links_:\n - " + "\n - ".join(links)) if links else ""
class reference_builder(content_builder):
def __call__(self, doc):
if 'reference' not in doc: return ""
references = super(reference_builder, self).__call__(doc['reference'])
return ("\n_References_:\n - " + "\n - ".join(references)) if references else ""
#________________________________________________________________________
def pretty_print(doc,
head=None,
keyword=None,
data_upper_limit=20,
comment=lambda i, c: True,
formula=lambda i, c: True,
xref=lambda i, c: True,
link=lambda i, c: "broken link" not in c,
reference=lambda i, c: True,):
builders = [head_builder(),
keyword_builder(),
data_builder(upper_limit=data_upper_limit),
comment_builder(filter_pred=comment),
formula_builder(filter_pred=formula),
xref_builder(filter_pred=xref),
link_builder(filter_pred=link),
reference_builder(filter_pred=reference)]
descr = "\n".join([builder(doc) for builder in builders])
return descr
def oeis_search(id=None, seq=None, query="", start=0,):
# the following imports are too specific to appear at the top of the module.
from requests import get
from IPython.display import Markdown
payload = {"fmt": "json", "start": start}
if id: payload.update({"q": "id:A{:06d}".format(id)})
elif seq: payload.update({"q": ", ".join(map(str,seq))})
else: payload.update({"fmt": "json", "q": query})
try:
doc_result = get("https://oeis.org/search", params=payload,)
doc = doc_result.json()
except:
return lambda **pp_kwds: Markdown("<hr>__Connection Error__<hr>")
def searchable(**pp_kwds):
results_description = "_Results for query: <a href='{url}'>{url}</a>_<br><hr>".format(url=doc_result.url)
inner_results = [pretty_print(result, **pp_kwds) for result in doc['results']]
return Markdown(results_description + "\n<hr>".join(inner_results))
return searchable
| Python | 0 | |
661766d003c85ded302052119bf54f0ae972b9fb | Fix site ID fallback when testing. | mezzanine/utils/sites.py | mezzanine/utils/sites.py | from __future__ import unicode_literals
import os
import sys
from django.contrib.sites.models import Site
from mezzanine.conf import settings
from mezzanine.core.request import current_request
def current_site_id():
"""
Responsible for determining the current ``Site`` instance to use
when retrieving data for any ``SiteRelated`` models. If a request
is available, and the site can be determined from it, we store the
site against the request for subsequent retrievals. Otherwise the
order of checks is as follows:
- ``site_id`` in session. Used in the admin so that admin users
can switch sites and stay on the same domain for the admin.
- host for the current request matched to the domain of the site
instance.
- ``MEZZANINE_SITE_ID`` environment variable, so management
commands or anything else outside of a request can specify a
site.
- ``SITE_ID`` setting.
"""
from mezzanine.utils.cache import cache_installed, cache_get, cache_set
request = current_request()
site_id = getattr(request, "site_id", None)
if request and not site_id:
site_id = request.session.get("site_id", None)
if not site_id:
domain = request.get_host().lower()
if cache_installed():
# Don't use Mezzanine's cache_key_prefix here, since it
# uses this very function we're in right now to create a
# per-site cache key.
bits = (settings.CACHE_MIDDLEWARE_KEY_PREFIX, domain)
cache_key = "%s.site_id.%s" % bits
site_id = cache_get(cache_key)
if not site_id:
try:
site = Site.objects.get(domain__iexact=domain)
except Site.DoesNotExist:
pass
else:
site_id = site.id
if cache_installed():
cache_set(cache_key, site_id)
if request and site_id:
request.site_id = site_id
if not site_id:
site_id = os.environ.get("MEZZANINE_SITE_ID", settings.SITE_ID)
if request and site_id and not getattr(settings, "TESTING", False):
request.site_id = site_id
return site_id
def has_site_permission(user):
"""
Checks if a staff user has staff-level access for the current site.
The actual permission lookup occurs in ``SitePermissionMiddleware``
which then marks the request with the ``has_site_permission`` flag,
so that we only query the db once per request, so this function
serves as the entry point for everything else to check access. We
also fall back to an ``is_staff`` check if the middleware is not
installed, to ease migration.
"""
mw = "mezzanine.core.middleware.SitePermissionMiddleware"
if mw not in settings.MIDDLEWARE_CLASSES:
from warnings import warn
warn(mw + " missing from settings.MIDDLEWARE_CLASSES - per site"
"permissions not applied")
return user.is_staff and user.is_active
return getattr(user, "has_site_permission", False)
def host_theme_path(request):
"""
Returns the directory of the theme associated with the given host.
"""
for (host, theme) in settings.HOST_THEMES:
if host.lower() == request.get_host().split(":")[0].lower():
try:
__import__(theme)
module = sys.modules[theme]
except ImportError:
pass
else:
return os.path.dirname(os.path.abspath(module.__file__))
return ""
def templates_for_host(request, templates):
"""
Given a template name (or list of them), returns the template names
as a list, with each name prefixed with the device directory
inserted into the front of the list.
"""
if not isinstance(templates, (list, tuple)):
templates = [templates]
theme_dir = host_theme_path(request)
host_templates = []
if theme_dir:
for template in templates:
host_templates.append("%s/templates/%s" % (theme_dir, template))
host_templates.append(template)
return host_templates
return templates
| from __future__ import unicode_literals
import os
import sys
from django.contrib.sites.models import Site
from mezzanine.conf import settings
from mezzanine.core.request import current_request
def current_site_id():
"""
Responsible for determining the current ``Site`` instance to use
when retrieving data for any ``SiteRelated`` models. If a request
is available, and the site can be determined from it, we store the
site against the request for subsequent retrievals. Otherwise the
order of checks is as follows:
- ``site_id`` in session. Used in the admin so that admin users
can switch sites and stay on the same domain for the admin.
- host for the current request matched to the domain of the site
instance.
- ``MEZZANINE_SITE_ID`` environment variable, so management
commands or anything else outside of a request can specify a
site.
- ``SITE_ID`` setting.
"""
from mezzanine.utils.cache import cache_installed, cache_get, cache_set
request = current_request()
site_id = getattr(request, "site_id", None)
if request and not site_id:
site_id = request.session.get("site_id", None)
if not site_id:
domain = request.get_host().lower()
if cache_installed():
# Don't use Mezzanine's cache_key_prefix here, since it
# uses this very function we're in right now to create a
# per-site cache key.
bits = (settings.CACHE_MIDDLEWARE_KEY_PREFIX, domain)
cache_key = "%s.site_id.%s" % bits
site_id = cache_get(cache_key)
if not site_id:
try:
site = Site.objects.get(domain__iexact=domain)
except Site.DoesNotExist:
pass
else:
site_id = site.id
if cache_installed():
cache_set(cache_key, site_id)
if not site_id:
site_id = os.environ.get("MEZZANINE_SITE_ID", settings.SITE_ID)
if request and site_id:
request.site_id = site_id
return site_id
def has_site_permission(user):
"""
Checks if a staff user has staff-level access for the current site.
The actual permission lookup occurs in ``SitePermissionMiddleware``
which then marks the request with the ``has_site_permission`` flag,
so that we only query the db once per request, so this function
serves as the entry point for everything else to check access. We
also fall back to an ``is_staff`` check if the middleware is not
installed, to ease migration.
"""
mw = "mezzanine.core.middleware.SitePermissionMiddleware"
if mw not in settings.MIDDLEWARE_CLASSES:
from warnings import warn
warn(mw + " missing from settings.MIDDLEWARE_CLASSES - per site"
"permissions not applied")
return user.is_staff and user.is_active
return getattr(user, "has_site_permission", False)
def host_theme_path(request):
"""
Returns the directory of the theme associated with the given host.
"""
for (host, theme) in settings.HOST_THEMES:
if host.lower() == request.get_host().split(":")[0].lower():
try:
__import__(theme)
module = sys.modules[theme]
except ImportError:
pass
else:
return os.path.dirname(os.path.abspath(module.__file__))
return ""
def templates_for_host(request, templates):
"""
Given a template name (or list of them), returns the template names
as a list, with each name prefixed with the device directory
inserted into the front of the list.
"""
if not isinstance(templates, (list, tuple)):
templates = [templates]
theme_dir = host_theme_path(request)
host_templates = []
if theme_dir:
for template in templates:
host_templates.append("%s/templates/%s" % (theme_dir, template))
host_templates.append(template)
return host_templates
return templates
| Python | 0 |
ba50883881d3e652c1175489e16c3c5839807feb | add new: GWinstek GDS-840S, RS-232 connection. | serial/serialGDS840S.py | serial/serialGDS840S.py | #!/usr/bin/env python
# RS-232 serial support for GW Instek GDS-840S Digital Storage Oscilloscope
# http://www.gwinstek.com/html/en/DownloadFile.asp?sn=255&uid=&lv=
# Filename: 82DS-82000IA.pdf
import serial
# Values set on unit manually (but these are standard settings)
ser = serial.Serial('/dev/ttyUSB0',baudrate=38400, bytesize=8, stopbits=1, \
parity=serial.PARITY_NONE, timeout=3)
ser.open()
def sendCmd(handler,command):
handler.write("%s\n" %(command))
def recvCmd(handler):
return handler.readline().strip()
sendCmd(ser, "*IDN?")
id = ser.readline()
print id
#~ sendCmd(ser, ":AUToset")
sendCmd(ser, ":MEASure:FREQuency?")
freq = recvCmd(ser)
print freq
ser.close()
| Python | 0 | |
e07b970568971bab8b46e847b68d7a6b4bd93539 | Transform binary classifiers into multi class | milk/supervised/multi.py | milk/supervised/multi.py | # -*- coding: utf-8 -*-
# Copyright (C) 2008, Luís Pedro Coelho <lpc@cmu.edu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import numpy
import random
class one_against_rest(object):
'''
Implements one vs. rest classification strategy to transform
a binary classifier into a multi-class classifier.
classifier = one_against_rest(base)
base must be a callable object that provides the base classifier to use.
Example
-------
multi = one_against_rest(milk.supervised.simple_svm)
multi.train(training_features,labels)
print multi.apply(testing_features)
We are using a class as a base, but we can use any callable object:
multi = one_against_rest(lambda : milk.supervised.tree(purity_measure='gini'))
...
See Also
-------
one_against_rest
'''
def __init__(self,base):
self.classifiers = None
self.base = base
self.is_multi_class = True
self.trained
def train(self,features,labels):
labels, self.names = normaliselabels(labels)
self.nclasses = labels.max() + 1
self.classifiers=[]
for i in xrange(self._nclasses):
s = self.base()
s.train(features, labels == i)
self.classifiers.append(s)
self.trained = True
def apply(self,feats):
return [self.apply_one(f) for f in feats]
def apply_one(self,feats):
assert self.trained
vals = numpy.array([c.apply_one(feats) for c in self.classifiers])
idxs, = numpy.where(vals == 1)
if len(idxs) == 1:
label = idxs[0]
elif len(idxs) == 0:
label = random.randint(0, self.nclasses)
else:
label = random.choice(idxs)
return self.names[label]
class one_against_one(object):
'''
Implements one vs. one classification strategy to transform
a binary classifier into a multi-class classifier.
classifier = one_against_one(base)
base must be a callable object that provides the base classifier to use.
Example
-------
multi = one_against_one(milk.supervised.simple_svm)
multi.train(training_features,labels)
print multi.apply(testing_features)
We are using a class as a base, but we can use any callable object:
multi = one_against_one(lambda : milk.supervised.tree(purity_measure='gini'))
...
See Also
-------
one_against_rest
'''
def __init__(self,base):
self._classifiers = None
self._base = base
self.is_multi_class = True
self.trained = False
def train(self,features,labels):
'''
one_against_one.train(objs,labels)
'''
labels, self.names = normaliselabels(labels)
self.nclasses = labels.max() + 1
self.classifiers=[[None for i in xrange(self.nclasses)] for j in xrange(self.nclasses)]
for i in xrange(self.nclasses):
for j in xrange(i+1,self.nclasse):
s = self.base()
idxs = (labels == i) | (labels == j)
s.train(features[idxs],labels[idxs]==i)
self.classifiers[i][j] = s
self.trained = True
def apply(self,feats):
'''
one_against_one.apply(objs)
Apply the learned classifier to a sequence of objects.
See also
--------
apply_one
'''
return [self.apply_one(f) for f in feats]
def apply_one(self,feats):
'''
one_against_one.apply_one(obj)
Classify one single object.
See also
--------
apply
'''
assert self.trained
nc = self.nclasses
votes = zeros(nc)
for i in xrange(nc):
for j in xrange(i+1,nc):
c=self.classifiers[i][j].apply(feats)
if c:
votes[i] += 1
else:
votes[j] += 1
return self.names[votes.argmax(0)]
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
| Python | 0.999998 | |
096087b4fddf9bc2644bcbb71834fcfc5985558c | add flash order | scripts/flash-order.py | scripts/flash-order.py | #!/usr/bin/python3
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
pnconfig = PNConfiguration()
pnconfig.subscribe_key = "my_subkey"
pnconfig.publish_key = "my_pubkey"
pnconfig.ssl = False
pubnub = PubNub(pnconfig)
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNOperationType, PNStatusCategory
class MySubscribeCallback(SubscribeCallback):
def status(self, pubnub, status):
pass
# The status object returned is always related to subscribe but could contain
# information about subscribe, heartbeat, or errors
# use the operationType to switch on different options
if status.operation == PNOperationType.PNSubscribeOperation \
or status.operation == PNOperationType.PNUnsubscribeOperation:
if status.category == PNStatusCategory.PNConnectedCategory:
pass
# This is expected for a subscribe, this means there is no error or issue whatsoever
elif status.category == PNStatusCategory.PNReconnectedCategory:
pass
# This usually occurs if subscribe temporarily fails but reconnects. This means
# there was an error but there is no longer any issue
elif status.category == PNStatusCategory.PNDisconnectedCategory:
pass
# This is the expected category for an unsubscribe. This means there
# was no error in unsubscribing from everything
elif status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:
pass
# This is usually an issue with the internet connection, this is an error, handle
# appropriately retry will be called automatically
elif status.category == PNStatusCategory.PNAccessDeniedCategory:
pass
# This means that PAM does allow this client to subscribe to this
# channel and channel group configuration. This is another explicit error
else:
pass
# This is usually an issue with the internet connection, this is an error, handle appropriately
# retry will be called automatically
elif status.operation == PNOperationType.PNSubscribeOperation:
# Heartbeat operations can in fact have errors, so it is important to check first for an error.
# For more information on how to configure heartbeat notifications through the status
# PNObjectEventListener callback, consult <link to the PNCONFIGURATION heartbeart config>
if status.is_error():
pass
# There was an error with the heartbeat operation, handle here
else:
pass
# Heartbeat operation was successful
else:
pass
# Encountered unknown status type
def presence(self, pubnub, presence):
pass # handle incoming presence data
def message(self, pubnub, message):
pass # handle incoming messages
pubnub.add_listener(MySubscribeCallback())
| Python | 0 | |
7dee9be2022bdf481bc5bc6766684058fd9d44e5 | add script for generating the manifest for a given package | scripts/genmanifest.py | scripts/genmanifest.py | #!/usr/bin/python
#
# Copyright (c) 2008 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import os
import sys
sys.path.insert(0, os.environ['HOME'] + '/hg/rpath-xmllib')
sys.path.insert(0, os.environ['HOME'] + '/hg/conary')
sys.path.insert(0, os.environ['HOME'] + '/hg/mirrorball')
from conary.lib import util
sys.excepthook = util.genExcepthook()
from updatebot import bot, config, log
log.addRootLogger()
cfg = config.UpdateBotConfig()
cfg.read(os.environ['HOME'] + '/hg/mirrorball/config/centos/updatebotrc')
obj = bot.Bot(cfg)
obj._populatePkgSource()
pkgName = sys.argv[1]
srcPkg = obj._updater._getPackagesToImport(pkgName)
manifest = obj._updater._getManifestFromPkgSource(srcPkg)
print '\n'.join(manifest)
| Python | 0.000002 | |
0ca7d4a20c8a65e45ddb7c61ca72c0e6c464a80e | Create template_redacted entry for templates created by migration | migrations/versions/0296_template_redacted_fix.py | migrations/versions/0296_template_redacted_fix.py | """
Revision ID: 0296_template_redacted_fix
Revises: 0295_api_key_constraint
Create Date: 2019-06-07 17:02:14.350064
"""
from alembic import op
revision = '0296_template_redacted_fix'
down_revision = '0295_api_key_constraint'
def upgrade():
op.execute("""
INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
SELECT templates.id, FALSE, now(), templates.created_by_id
FROM templates
WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id)
;
""")
def downgrade():
pass
| Python | 0 | |
a38f18b8c51ad83b5c4b92853fa5640137131ad9 | imprime sequencia de gtins, calculando dígito verificador | script/gera_gtin.py | script/gera_gtin.py | from gtin import GTIN
country = 789
company = 96188
product = 7251
quant = 127
for incr in range(quant):
numero_gtin = '{}{}{}'.format(country, company, product+incr)
print(str(GTIN(raw=numero_gtin)))
| Python | 0.000001 | |
2611476df6f362cd59e4aad38a243fc8f6cbf8a8 | Purge da página de palestra quando salva palestrante | devincachu/purger.py | devincachu/purger.py | # -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestrante)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestrante)
| # -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
| Python | 0 |
a01d471bc8bd171de9301b428466f7ccba93872b | Revert "Made the scheduling of Queue.get more fair." | diesel/util/queue.py | diesel/util/queue.py | from time import time
from uuid import uuid4
from collections import deque
from diesel import wait, fire, sleep, first
class QueueEmpty(Exception): pass
class QueueTimeout(Exception): pass
class Queue(object):
def __init__(self):
self.wait_id = uuid4().hex
self.inp = deque()
def put(self, i=None):
self.inp.append(i)
fire(self.wait_id)
def get(self, waiting=True, timeout=None):
start = time()
while not self.inp and waiting:
if timeout:
remaining = timeout - (time() - start)
if remaining <= 0:
raise QueueTimeout()
else:
first(waits=[self.wait_id], sleep=remaining)
else:
wait(self.wait_id)
if self.inp:
return self.inp.popleft()
elif not waiting:
raise QueueEmpty()
def __iter__(self):
return self
def next(self):
return self.get()
@property
def is_empty(self):
return not bool(self.inp)
if __name__ == '__main__':
from diesel import Application, Loop, sleep
app = Application()
queue = Queue()
def worker():
sleep(0.25)
queue.put(1)
queue.put(2)
def consumer_no_wait():
try:
queue.get(waiting=False)
except QueueEmpty:
pass
else:
assert False
def consumer_timeout():
try:
queue.get(timeout=0.1)
except QueueTimeout:
pass
else:
assert False
def consumer(expected):
val = queue.get()
assert expected == val, '%s != %s' % (expected, val)
if queue.is_empty:
print 'success!'
app.halt()
app.add_loop(Loop(worker))
app.add_loop(Loop(consumer_no_wait))
app.add_loop(Loop(consumer_timeout))
app.add_loop(Loop(lambda: consumer(1)))
app.add_loop(Loop(lambda: consumer(2)))
app.run()
| from time import time
from uuid import uuid4
from collections import deque
from diesel import wait, fire, sleep, first
class QueueEmpty(Exception): pass
class QueueTimeout(Exception): pass
class Queue(object):
def __init__(self):
self.inp = deque()
self.waiters = deque()
def put(self, i=None):
self.inp.append(i)
if self.waiters:
wait_id = self.waiters.popleft()
fire(wait_id)
def get(self, waiting=True, timeout=None):
start = time()
if not self.inp and waiting:
wait_id = uuid4().hex
self.waiters.append(wait_id)
while not self.inp and waiting:
if timeout:
remaining = timeout - (time() - start)
if remaining <= 0:
raise QueueTimeout()
else:
first(waits=[wait_id], sleep=remaining)
else:
wait(wait_id)
if self.inp:
return self.inp.popleft()
elif not waiting:
raise QueueEmpty()
def __iter__(self):
return self
def next(self):
return self.get()
@property
def is_empty(self):
return not bool(self.inp)
if __name__ == '__main__':
from diesel import Application, Loop, sleep
app = Application()
queue = Queue()
def worker():
sleep(0.25)
queue.put(1)
queue.put(2)
def consumer_no_wait():
try:
queue.get(waiting=False)
except QueueEmpty:
pass
else:
assert False
def consumer_timeout():
try:
queue.get(timeout=0.1)
except QueueTimeout:
pass
else:
assert False
def consumer(expected):
val = queue.get()
assert expected == val, '%s != %s' % (expected, val)
if queue.is_empty:
print 'success!'
app.halt()
app.add_loop(Loop(worker))
app.add_loop(Loop(consumer_no_wait))
app.add_loop(Loop(consumer_timeout))
app.add_loop(Loop(lambda: consumer(1)))
app.add_loop(Loop(lambda: consumer(2)))
app.run()
| Python | 0 |
b8cd3912bbe67381829f70ec0f1d94e590632387 | Create BLASTvouchers.py | BLASTvouchers.py | BLASTvouchers.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 9 23:13:07 2017
BLASTvouchers.py
Will BLAST contigs made by MIRA against most recent Invertebrate db.
Modification: 11Apr17, now requires csv file list of fasta files to BLAST (because BLAST mod requires fasta input)
@author: tcampbell
"""
import os
import signal
import pandas as pd
import re
import mysql.connector
import sqlalchemy
import numpy as np
from MIRA_templateFilewriter import mysqlconnect
from IonZipSplitv3 import chunkdf #need to make sure this gets updated with new versions.
import mysql
import multiprocessing
from Bio.Blast import NCBIXML
import argparse
def BLASTvouchers(df, directory, assemdir, outdir):#this doesn't seem to be working with the Bio version of BLAST. should try with os.system BLAST cmd.
x = 0
x+=1
outfile = re.search(r'.*\/([A-Za-z0-9]*\_[0-9]{2}[A-H])_d_results/.*', str(df['FileName']))
if os.path.exists(directory + outdir) == False:
os.system('mkdir ' + directory + outdir )
outf = directory + outdir + str(outfile.group(1)) + '.xml'
print outf
os.system('/home/tcampbell/BLASTdb/ncbi-blast-2.6.0+/bin/blastn -query ' + str(df['FileName']) + ' -db /home/tcampbell/BLASTdb/MLML_Coarb_nosemi_10Apr17.fa -evalue 0.01 -outfmt 5 -out ' + outf)
Return None
def applyblast(df):
processid = os.getpid()
print processid
df[0].apply(BLASTvouchers, axis=1, args=(df[1], df[2],df[3] ))
os.kill(processid, signal.SIGTERM)
return None
def parsxml(filein):
result_handle = open(filein)
blast_records = NCBIXML.parse(result_handle)
assembly = 0
for blast_record in blast_records:
count = 0
assembly += 1
print "Assembly Sequence:", assembly
for alignment in blast_record.alignments[:3]:
for hsp in alignment.hsps:
print('****Alignment****')
print('sequence:', alignment.title)
print('e value:', hsp.expect)
print('length:', alignment.length)
print('score:', hsp.score)
print('alignment length:', hsp.align_length)
print('identity:',hsp.identities)
pctID = float(hsp.identities) / float(hsp.align_length) * 100
print 'Percent ID:', pctID
print('Query Start:', hsp.query_start)
count +=1
print "BLAST alignment Num", count
return blast_records
def main():
#===============PARSE ARGS===========================
parser = argparse.ArgumentParser(prog='IonZipSplitv3', usage='Insert Assembled Contigs into MySQL database', description=
'Insert Assembled Sequences into DB', conflict_handler='error', add_help=True)
parser.add_argument('--SequenceFiles', type = str, nargs = '+', help = 'File names of Sequencing Runs to assemble')
parser.add_argument('--Out', type = str, help = 'Output Directory, e.g. if /home/user/Documents/out/ then out/')
parser.add_argument('--DataDir', type=str, help = 'The directory where your data are found. e.g. /home/user/Documents/')
parser.add_argument('--AssemblyDir', type = str, help = 'Name of the subdirectory with assembly folders')
parser.add_argument('--Cores', type = int, default = 56, help = 'Number of processing cores to use. Default is 56.')
args = parser.parse_args()
#===============SQL Connect====================================================
connect = mysqlconnect('/home/tcampbell/scripts/Testdict.txt') #gets the connection info from config file
exec(connect) #executes declaration of variable using string from previous step
cnx = mysql.connector.connect(user=connect['User'], password= connect['Pass'], host= connect['IP'], database=connect['DB'])
engine = sqlalchemy.create_engine('mysql://' + connect['User'] + ':' + connect['Pass']+ '@' + connect['IP'] +'/'+ connect['DB']) # is used?
cursor = cnx.cursor()
#======================Begin Main=====================================
fastas = pd.read_csv(args.DataDir + args.AssemblyDir + 'fastalist.csv', header = None)#Fastalist is made in AssembledSequenceProcess.py
fastas.columns = ['FileName']
chuckfas, Cores = chunkdf(fastas, args.Cores)
d = [args.DataDir, args.AssemblyDir, args.Out]
#comment for debug 5/4/17
chuckfas = [i + d for i in chuckfas]
pool = multiprocessing.Pool(processes=Cores)
re = pool.map(applyblast, chuckfas)
# re = pool.apply(assemble,args = (chunkeddf), axis=1)
pool.close()
####END Main Guts######
cursor.close()
cnx.close()
return None
if __name__ == '__main__':
main()
| Python | 0 | |
4fe11f89c008909dd21451ac0e23dce86de7c849 | Add profiling dev script. | dev_scripts/profile_structure.py | dev_scripts/profile_structure.py | #!/usr/bin/env python
from pymatgen.io.vaspio import Poscar
import cProfile
import pstats
import os
p = Poscar.from_file("../test_files/POSCAR.LiFePO4", check_for_POTCAR=False)
s = p.structure
def test():
nn = s.get_sites_in_sphere([0, 0, 0], 20)
print len(nn)
cProfile.run('test()', 'testprof')
p = pstats.Stats('testprof')
p.sort_stats('cumulative').print_stats(20)
os.remove("testprof")
| Python | 0.000098 | |
aa8a87eabd97406c91b0474dc6018b618101b503 | add the code | monkey_sockets_socks5.py | monkey_sockets_socks5.py | #!/usr/bin/env python
# monkey patch socks5 support into sockets
import os
import socket
import struct
def _split_proxy(uri, port):
split_auth = uri.split("@")
if uri == "":
split_uri = []
elif len(split_auth) == 2:
split_first = split_auth[0].split(":")
split_second = split_auth[1].split(":")
if len(split_first) == 3:
split_uri = [int(split_first[0])] + split_first[1:] + [split_second[0], int(split_second[1])]
else:
split_uri = [int(split_first[0])] + split_first[1:] + [""] + [split_second[0], int(split_second[1])]
else:
split_small = split_auth[0].split(":")
split_uri = [int(split_small[0])] + [""] + [""] + [split_small[1]] + [int(split_small[2])]
if len(split_uri) != 5:
split_uri = None
elif split_uri[0] != port:
split_uri = None
return split_uri
# CAVEATS:
# only supports ipv4
# only supports socks5
# user/pass auth has not been tested
# if socks_proxy env variable is set, all socket connections on that port will use it
class Socks5Socket(socket.socket):
def connect(self, address):
# socks_proxy=<DESTPORT:>[username[:password]@]<PROXYHOST:><PROXYPORT>
socks_proxy = _split_proxy(os.getenv("socks_proxy",""), address[1])
if not socks_proxy:
true_socket.connect(self, address)
else:
# print "{socks_host}:{socks_port} -> {remote_host}:{remote_port}".format(socks_host=socks_proxy[3], socks_port=socks_proxy[4], remote_host=address[0], remote_port=address[1])
true_socket.connect(self, (socks_proxy[3], socks_proxy[4]))
auth_methods_available = 1
auth_methods = [0x00]
if socks_proxy[1]:
auth_methods_available += 1
auth_methods.append(0x02)
# greet the socks server
msg = struct.pack("!BB",0x05,auth_methods_available)
for auth_method in auth_methods:
msg += struct.pack("!B", auth_method)
# print msg.encode("hex")
self.send(msg)
resp = self.recv(2)
# print resp.encode("hex")
(version, auth_method) = struct.unpack("!BB", resp)
# authorize to the socks server
if auth_method == 0x00:
pass
elif auth_method == 0x02:
# TODO: test this :/
msg = struct.pack("!BBsBs", 0x01, len(socks_proxy[1]), socks_proxy[1], len(socks_proxy[2]), socks_proxy[2])
# print msg.encode("hex")
self.send(msg)
resp = self.recv(2)
# print resp.encode("hex")
(version, status) = struct.unpack("!BB", resp)
if status != 0:
self.close()
raise Exception("socks authorization failed")
else:
raise Exception("no acceptable socks authorization available")
# set connection to tcp/ip stream, ipv4
ipb = [int(b) for b in address[0].split(".")]
msg = struct.pack("!B B B B BBBB H",0x05,0x01,0x00,0x01,ipb[0],ipb[1],ipb[2],ipb[3],address[1])
# print msg.encode("hex")
self.send(msg)
resp = self.recv(10)
# print resp.encode("hex")
(version, status) = struct.unpack("!B B 8x", resp)
if status != 0:
self.close()
raise Exception("socks connection failed, error: " + status)
true_socket = socket.socket
socket.socket = Socks5Socket
| Python | 0.000265 | |
ea30b49012af2003049f4b1b7deeecb1232c7513 | Create permutations.py | permutations.py | permutations.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Permutations
#Problem level: 4 kyu
from itertools import groupby, permutations as perm
def permutations(string):
return [k for k,_ in groupby(sorted([''.join(comb) for comb in perm(string)]))]
| Python | 0.000202 | |
eb943bb45695472483352978060a94e0d48b5e4a | Add scatterplot | plot/scatter.py | plot/scatter.py | import matplotlib.pyplot as plt
def plot_scatter(x, y, ax=None, color=None, alpha=None, size=None, labels=None, title="Scatterplot", figsize=(10,6)):
# TODO: Add x, and y labels
# TODO: grid
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
fig.suptitle(title, fontsize=15)
else:
fig = ax.get_figure()
ax.scatter(x=x, y=y, c=color, alpha=alpha, s=size)
# LABEL - each of the points
if labels is not None:
for xx, yy, label in zip(x, y, labels):
plt.annotate(label, xy=(xx, yy), xytext=(7, 0),
textcoords='offset points',
ha='left', va='center')
return fig, ax
| Python | 0.000006 | |
02f207269f7d2773919e520e04ab8f9261357d4b | Add isup plugin | plugins/isup.py | plugins/isup.py | import requests
import urllib.parse
class Plugin:
def __call__(self, bot):
bot.on_respond(r"is (.*) (up|down)(\?)?", self.on_respond)
bot.on_respond(r"isup (.*)$", self.on_respond)
bot.on_help("isup", self.on_help)
def on_respond(self, bot, msg, reply):
url = "http://isitup.org/" + urllib.parse.quote(msg["match"].group(1)) + ".json"
headers = { "User-Agent": "SmartBot" }
res = requests.get(url, headers=headers).json()
if res["status_code"] == 1:
reply("{0} looks up for me.".format(res["domain"]))
else:
reply("{0} looks down for me.".format(res["domain"]))
def on_help(self, bot, msg, reply):
reply("Syntax: is <domain> up|down")
| Python | 0.000001 | |
8f2d22d05912711a6f2a771860124a378fd73b98 | add test cases | tests/chainer_tests/training_tests/triggers_tests/test_once_trigger.py | tests/chainer_tests/training_tests/triggers_tests/test_once_trigger.py | from __future__ import division
import numpy as np
import random
import six
import tempfile
import unittest
from chainer import serializers
from chainer import testing
from chainer.testing import condition
from chainer import training
def get_expected(num):
return [i == 0 for i in six.moves.range(num)]
@testing.parameterize(
# iteration
{
'iter_per_epoch': 5, 'interval': (2, 'iteration'), 'resume': 4,
'expected': get_expected(7)},
# basic epoch
{
'iter_per_epoch': 1, 'interval': (3, 'epoch'), 'resume': 4,
'expected': get_expected(7)},
# fractional epoch
{
'iter_per_epoch': 2, 'interval': (1.5, 'epoch'), 'resume': 4,
'expected': get_expected(7)},
# unaligned epoch
{
'iter_per_epoch': 2.5, 'interval': (1, 'epoch'), 'resume': 3,
'expected': get_expected(7)},
# tiny epoch
{
'iter_per_epoch': 0.5, 'interval': (1, 'epoch'), 'resume': 4,
'expected': get_expected(7)},
)
class TestOnceTrigger(unittest.TestCase):
def test_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.OnceTrigger(*self.interval)
# before the first iteration, trigger should be False
for expected in [False] + self.expected:
self.assertEqual(trigger(trainer), expected)
trainer.updater.update()
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.OnceTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
@condition.repeat(10)
def test_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.OnceTrigger(*self.interval)
accumulated = False
# before the first iteration, trigger should be False
for expected in [False] + self.expected:
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
trainer.updater.update()
@condition.repeat(10)
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
serializers.save_npz(f.name, trigger)
trigger = training.triggers.OnceTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
def test_resumed_trigger_backward_compat(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
# old version does not save anything
np.savez(f, dummy=0)
trigger = training.triggers.OnceTrigger(*self.interval)
with testing.assert_warns(UserWarning):
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
testing.run_module(__name__, __file__)
| Python | 0.000011 | |
8c6b412e01e81a7c062ba8234ebafc6fca61651c | Add shovel test.quick for sanity check before pushing | shovel/test.py | shovel/test.py | # coding: utf-8
from __future__ import absolute_import, division, print_function
import subprocess
from collections import OrderedDict
from shovel import task
@task
def quick():
failed = OrderedDict.fromkeys(
['test', 'docs', 'spelling', 'doc8', 'flake8'], False)
failed['tests'] = bool(subprocess.call(['py.test', 'astrodynamics/']))
failed['docs'] = bool(subprocess.call(
['sphinx-build', '-W', '-b', 'html', 'docs', 'docs/_build/html']))
failed['spelling'] = bool(subprocess.call([
'sphinx-build', '-W', '-b', 'spelling', 'docs', 'docs/_build/html']))
failed['doc8'] = bool(subprocess.call(['doc8', 'docs']))
failed['flake8'] = bool(subprocess.call(['flake8']))
print('\nSummary:')
for k, v in failed.items():
print('{:8s}: {}'.format(k, 'Fail' if v else 'Pass'))
| Python | 0 | |
c645cb3402f7017550d78795298f042a39bb238b | add gae_fluidinfo for simple async requests on appengine | gae_fluidinfo.py | gae_fluidinfo.py | # -*- coding: utf-8 -*-
"""
A very thin wrapper on top of the Fluidinfo RESTful API
Copyright (c) 2009-2010 Seo Sanghyeon, Nicholas Tollervey and others
See README, AUTHORS and LICENSE for more information
"""
import sys
import urllib
import types
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
from google.appengine.api import urlfetch
# There are currently two instances of Fluidinfo. MAIN is the default standard
# instance and SANDBOX is a scratch version for testing purposes. Data in
# SANDBOX can (and will) be blown away.
MAIN = 'https://fluiddb.fluidinfo.com'
SANDBOX = 'https://sandbox.fluidinfo.com'
instance = MAIN
ITERABLE_TYPES = set((list, tuple))
SERIALIZABLE_TYPES = set((types.NoneType, bool, int, float, str, unicode, list,
tuple))
global_headers = {
'Accept': '*/*',
}
def login(username, password):
"""
Creates the 'Authorization' token from the given username and password.
"""
userpass = username + ':' + password
auth = 'Basic ' + userpass.encode('base64').strip()
global_headers['Authorization'] = auth
def logout():
"""
Removes the 'Authorization' token from the headers passed into Fluidinfo
"""
if 'Authorization' in global_headers:
del global_headers['Authorization']
def get(path, body=None, mime=None, tags=[], custom_headers={},
async=False, **kw):
"""
Convenience method for fluidinfo.call('GET', ...)
"""
return call('GET', path, body, mime, tags, custom_headers, async, **kw)
def post(path, body=None, mime=None, tags=[], custom_headers={},
async=False, **kw):
"""
Convenience method for fluidinfo.call('POST', ...)
"""
return call('POST', path, body, mime, tags, custom_headers, async, **kw)
def put(path, body=None, mime=None, tags=[], custom_headers={},
async=False, **kw):
"""
Convenience method for fluidinfo.call('PUT', ...)
"""
return call('PUT', path, body, mime, tags, custom_headers, async, **kw)
def delete(path, body=None, mime=None, tags=[], custom_headers={},
async=False, **kw):
"""
Convenience method for fluidinfo.call('DELETE', ...)
"""
return call('DELETE', path, body, mime, tags, custom_headers, async, **kw)
def head(path, body=None, mime=None, tags=[], custom_headers={},
async=False, **kw):
"""
Convenience method for fluidinfo.call('HEAD', ...)
"""
return call('HEAD', path, body, mime, tags, custom_headers, async, **kw)
def call(method, path, body=None, mime=None, tags=[], custom_headers={},
async=False, **kw):
"""
Makes a call to Fluidinfo
method = HTTP verb. e.g. PUT, POST, GET, DELETE or HEAD
path = Path appended to the instance to locate the resource in Fluidinfo
this can be either a string OR a list of path elements.
body = The request body (a dictionary will be translated to json,
primitive types will also be jsonified)
mime = The mime-type for the body of the request - will override the
jsonification of primitive types
tags = The list of tags to return if the request is to values
headers = A dictionary containing additional headers to send in the request
**kw = Query-string arguments to be appended to the URL
"""
# build the URL
url = build_url(path)
if kw:
url = url + '?' + urllib.urlencode(kw)
if tags and path.startswith('/values'):
# /values based requests must have a tags list to append to the
# url args (which are passed in as **kw), so append them so everything
# gets urlencoded correctly below
url = url + '&' + urllib.urlencode([('tag', tag) for tag in tags])
# set the headers
headers = global_headers.copy()
if custom_headers:
headers.update(custom_headers)
# make sure the path is a string for the following elif check for PUT
# based requests
if isinstance(path, list):
path = '/'+'/'.join(path)
# Make sure the correct content-type header is sent
if isinstance(body, dict):
# jsonify dicts
headers['content-type'] = 'application/json'
body = json.dumps(body)
elif method.upper() == 'PUT' and (
path.startswith('/objects/') or path.startswith('/about')):
# A PUT to an "/objects/" or "/about/" resource means that we're
# handling tag-values. Make sure we handle primitive/opaque value types
# properly.
if mime:
# opaque value (just set the mime type)
headers['content-type'] = mime
elif isprimitive(body):
# primitive values need to be json-ified and have the correct
# content-type set
headers['content-type'] = 'application/vnd.fluiddb.value+json'
body = json.dumps(body)
else:
# No way to work out what content-type to send to Fluidinfo so
# bail out.
raise TypeError("You must supply a mime-type")
rpc = urlfetch.create_rpc()
urlfetch.make_fetch_call(rpc, url, body, method, headers)
if async:
return rpc
return result(rpc)
def result(rpc):
"""
Retrieves the response object from the urlfetch rpc object.
"""
try:
response = rpc.get_result()
if (response.content and (response.headers['content-type'] in
['application/json', 'application/vnd.fluiddb.value+json'])):
response.content = json.loads(response.content)
return response
except urlfetch.Error:
return None
def isprimitive(body):
"""
Given the body of a request will return a boolean to indicate if the
value is a primitive value type.
See:
http://doc.fluidinfo.com/fluidDB/api/tag-values.html
&
http://bit.ly/hmrMzT
For an explanation of the difference between primitive and opaque
values.
"""
bodyType = type(body)
if bodyType in SERIALIZABLE_TYPES:
if bodyType in ITERABLE_TYPES:
if not all(isinstance(x, basestring) for x in body):
return False
return True
else:
return False
def build_url(path):
"""
Given a path that is either a string or list of path elements, will return
the correct URL
"""
url = instance
if isinstance(path, list):
url += '/'
url += '/'.join([urllib.quote(element, safe='') for element in path])
else:
url += urllib.quote(path)
return url
| Python | 0 | |
a4f5769944fc8190ec119ecf9ab5716bb7c38026 | Use python interface to build features | scripts/create_layer.py | scripts/create_layer.py | #!/usr/bin/env python3
# Written for Python 3.6
# Ideas:
# - Main function that converts 1 GeoJSON/shapefile to a vector tile layer with TerriaMap config and test files
# - Use asycnio
from contextlib import contextmanager
import os
import sys
from osgeo import ogr, osr
@contextmanager
def stdout_redirected(to):
'''
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
fd = sys.stdout.fileno()
##### assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
_redirect_stdout(to=to)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
def request_input(caption, default):
'Request input from the user, returning default if no input is given'
response = input('\x1b[94m' + '{} '.format(caption) + ('({}): '.format(default) if default else '') + '\x1b[0m')
return response if response != '' else default
def unique_with_prop(data_source, layername, prop):
'Determine if the property prop uniquely identifies every feature in the given layer of the DataSource'
layer = data_source.ExecuteSQL('SELECT COUNT(DISTINCT {1}) / COUNT(*) AS allunique FROM {0}'.format(layername, prop), dialect='SQLITE') # If one enjoys SQL attacking one's own files, then go ahead
return bool(layer.GetFeature(0).GetField('allunique'))
def create_layer(geometry_file):
'''
Create a vector tile layer with a given geometry file complete with mbtiles, server config,
TerriaMap files and test csvs
'''
data_source = ogr.Open(geometry_file)
if data_source is None:
print('{} could not be opened by ogr'.format(geometry_file))
return
layers = [data_source.GetLayerByIndex(i).GetName() for i in range(data_source.GetLayerCount())]
print('File has the following layers: {}'.format(', '.join(layers)))
layer_name = request_input('Which layer should be used?', layers[0] if len(layers) == 1 else '')
if layer_name not in layers:
print('Layer {} is not in file {}'.format(layer_name, geometry_file))
return
layer = data_source.GetLayerByName(layer_name)
generate_tiles_to = int(request_input('What zoom level should tiles be generated to?', 12))
layer_defn = layer.GetLayerDefn()
attributes = [layer_defn.GetFieldDefn(i).name for i in range(layer_defn.GetFieldCount())]
print('Attributes in file: {}'.format(', '.join(attributes)))
has_fid = yes_no_to_bool(request_input('Is there an FID attribute?', 'n'), False)
# Ask for the current FID attribute if there is one, otherwise add an FID and use that
fid_attribute = request_input('Which attribute should be used as an FID?', 'FID') if has_fid else 'FID'
server_url = request_input('Where is the vector tile server hosted?', 'http://localhost:8000/{}/{{z}}/{{x}}/{{y}}.pbf'.format(layername))
regionMapping_entries = OrderedDict()
regionMapping_entry_name = request_input('Name another regionMapping.json entry (leave blank to finish)', '')
while regionMapping_entry_name != '':
o = OrderedDict()
o['layerName'] = layer_name # Set in addRegionMap.js
o['server'] = server_url
o['regionProp'] = request_input('Which attribute should be used as the region property?', '')
# Test this property
if not unique_with_prop(shapefile, layername, o['regionProp']):
o['disambigProp'] = request_input('The given region property does not uniquely define every region. Which attribute should be used to disambiguate region matching?', '')
o['disambigRegionId'] = request_input('Which regionMapping definition does this disambiguation property come from?', '')
else:
print('The given region property uniquely defines each region.')
o['aliases'] = request_input('What aliases should this be available under? Separate aliases with a comma and space', '').split(', ')
o['description'] = description
regionMapping_entries[regionMapping_entry_name] = o
regionMapping_entry_name = request_input('Name another regionMapping.json entry (leave blank to finish)', '')
# Generate config files and tiles
# Start tippecanoe
p = subprocess.Popen(['tippecanoe', '-q', '-f', '-P', '-pp', '-pS', '-l', 'test', '-z', '12', '-d', '20', '-o', 'test.mbtiles'], stdin=subprocess.PIPE)
# Redirect C library stdout output to tippecanoe
with stdout_redirected(to=p.stdin):
# Create a GeoJSON file that gets output to stdout (and redirected to tippecanoe)
driver = ogr.GetDriverByName('GeoJSON')
dest_srs = osr.SpatialReference()
dest_srs.ImportFromEPSG(4326)
out_ds = driver.CreateDataSource('/vsistdout/')
# New polygon layer
out_layer = out_ds.CreateLayer(layer_name, dest_srs, geom_type=ogr.wkbMultiPolygon)
# Add an fid attribute if needed
if not has_fid:
fid_field = ogr.FieldDefn(fid_attribute, ogr.OFTInteger)
out_layer.CreateField(fid_field)
# Mirror inpput fields in output
for i in range(layer_defn.GetFieldCount()):
field_defn = layer_defn.GetFieldDefn(i)
out_layer.CreateField(field_defn)
# Get the output layer's feature definition
out_layer_defn = out_layer.GetLayerDefn()
# Iterate over features and add them to the output layer
for fid, feature in enumerate(layer):
out_feature = ogr.Feature(out_layer_defn)
# Set fields
for i in range(out_layer_defn.GetFieldCount()):
field_defn = out_layer_defn.GetFieldDefn(i)
field_name = field_defn.GetName()
if not has_fid and field_name == fid_attribute:
# Set FID
out_feature.SetField(out_layer.GetFieldDefn(i).GetNameRef(), fid)
else:
# Copy field value
out_feature.SetField(out_layer.GetFieldDefn(i).GetNameRef(), feature.GetField(layer_defn.GetFieldIndex(out_layer.GetFieldDefn(i).name)))
# Set geometry
geom = inFeature.GetGeometryRef()
out_feature.SetGeometry(geom.Clone())
# Add new feature to output layer
out_layer.CreateFeature(out_feature)
out_feature = None
data_source = None
out_ds = None
p.stdin.close()
p.wait()
| Python | 0 | |
cdcc45eb6982e68415632a8bcfbc5e7596e0a1cf | add resize_logos.py | scripts/resize_logos.py | scripts/resize_logos.py | #!/usr/bin/env python
import os
import os.path as op
from PIL import Image
dirPath = op.abspath('./logos')
out_dir = op.join(dirPath, 'resize')
if not op.exists(out_dir):
os.mkdir(out_dir)
supported_formats = ['png', 'gif', 'jpg']
for img_file in os.listdir(dirPath):
if img_file[-3:] not in supported_formats:
print('Extension for file {} not supported, skipped.'.format(img_file))
continue
print(img_file)
img_name = img_file[:-4]
print(img_name)
fpath = os.path.join(dirPath, img_file)
outPath = os.path.join(out_dir, img_name)
img = Image.open(fpath)
if img.mode == "CMYK":
img = img.convert("RGB")
img.thumbnail((190, 90), Image.ANTIALIAS)
img_w, img_h = img.size
background = Image.new('RGBA', (190, 90), (255, 255, 255, 255))
bg_w, bg_h = background.size
offset = int((bg_w - img_w) / 2), int((bg_h - img_h) / 2)
background.paste(img, offset)
background.save(outPath+"_thumb.png")
| Python | 0.000001 | |
ff994f8bfd7642fc95694d511a1cec81d0ba8f4d | fix bugs | plstackapi/planetstack/api/sites.py | plstackapi/planetstack/api/sites.py | from plstackapi.openstack.client import OpenStackClient
from plstackapi.openstack.driver import OpenStackDriver
from plstackapi.planetstack.api.auth import auth_check
from plstackapi.planetstack.models import Site
def add_site(auth, **fields):
driver = OpenStackDriver(client = auth_check(auth))
site = Site(**fields)
nova_fields = {'tenant_name': fields['login_base'],
'description': fields['name'],
'enabled': fields['enabled']}
tenant = driver.create_tenant(**nova_fields)
site.tenant_id=tenant.id
site.save()
return role
def update_site(auth, tenant_id, **fields):
driver = OpenStackDriver(client = auth_check(auth))
sites = Site.objects.filter(tenant_id=tenant_id)
if not sites:
return
site = Site[0]
nova_fields = {}
if 'description' in fields:
nova_fields['description'] = fields['name']
if 'enabled' in fields:
nova_fields['enabled'] = fields['enabled']
site.updtae(**fields)
return site
def delete_site(auth, filter={}):
driver = OpenStackDriver(client = auth_check(auth))
sites = Site.objects.filter(**filter)
for site in sites:
driver.delete_tenant({'id': site.tenant_id})
site.delete()
return 1
def get_sites(auth, filter={}):
client = auth_check(auth)
sites = Site.objects.filter(**filter)
return sites
| from plstackapi.openstack.client import OpenStackClient
from plstackapi.openstack.driver import OpenStackDriver
from plstackapi.planetstack.api.auth import auth_check
from plstackapi.planetstack.models import Site
def add_site(auth, **fields):
driver = OpenStackDriver(client = auth_check(auth))
site = Site(**fields)
nova_fields = {'tenant_name': fields['login_base'],
'description': fields['name',
'enabled': fields['enabled']}
tenant = driver.create_tenant(**nova_fields)
site.tenant_id=tenant.id
site.save()
return role
def update_site(auth, tenant_id, **fields):
driver = OpenStackDriver(client = auth_check(auth))
sites = Site.objects.filter(tenant_id=tenant_id)
if not sites:
return
site = Site[0]
nova_fields = {}
if 'description' in fields:
nova_fields['description'] = fields['name']
if 'enabled' in fields:
nova_fields['enabled'] = fields['enabled']
site.updtae(**fields)
return site
def delete_site(auth, filter={}):
driver = OpenStackDriver(client = auth_check(auth))
sites = Site.objects.filter(**filter)
for site in sites:
driver.delete_tenant({'id': site.tenant_id})
site.delete()
return 1
def get_sites(auth, filter={}):
client = auth_check(auth)
sites = Site.objects.filter(**filter)
return sites
| Python | 0.000001 |
1be4972ca39408b8d4770b5722642996908c9a70 | add 5-for.py | python/5-for.py | python/5-for.py | #!/usr/bin/env python
for letter in 'python':
print "Current character is ", letter
fruits = ['banana', 'apple', 'mango']
for fruit in fruits:
print "Current fruit is ", fruit
for index in range(len(fruits)):
print "Current fruit is ", fruits[index]
print "Good bye!"
| Python | 0.003482 | |
4cef0dc3af25ec4c781ed04b28d425374f793702 | add socket comm class | socket_communication.py | socket_communication.py | #!/usr/bin/env python
import socket
class SocketCommunication:
def __init__(self):
self.RECEIVER_HOST = '192.168.1.4' # The remote host
self.PORT = 3000 # The same port as used by the server
def open(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.connect((self.RECEIVER_HOST, self.PORT))
def close(self):
self.sock.close()
def communicate(self, data):
self.sock.send(data)
def main():
pass #TODO: add example
if __name__ == "__main__":
main()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.