commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
bd5e4dc55341e6ec98bf17211d7c3c6fdb99a3b1 | Use country_template in test_extra_params | tests/core/test_extra_params.py | tests/core/test_extra_params.py | # -*- coding: utf-8 -*-
from openfisca_core import periods
from openfisca_core.columns import IntCol, BoolCol
from openfisca_core.periods import MONTH
from openfisca_core.variables import Variable
from openfisca_country_template import CountryTaxBenefitSystem
from openfisca_country_template.entities import Person
from openfisca_core.tools import assert_near
from openfisca_core.base_functions import requested_period_last_value
class formula_1(Variable):
column = IntCol
entity = Person
definition_period = MONTH
def function(self, simulation, period):
return simulation.calculate('formula_3', period, extra_params = [0])
class formula_2(Variable):
column = IntCol
entity = Person
definition_period = MONTH
def function(self, simulation, period):
return simulation.calculate('formula_3', period, extra_params = [1])
class formula_3(Variable):
column = IntCol
entity = Person
definition_period = MONTH
def function(self, simulation, period, choice):
return self.zeros() + choice
class formula_4(Variable):
column = BoolCol
entity = Person
base_function = requested_period_last_value
definition_period = MONTH
def function(self, simulation, period, choice):
return self.zeros() + choice
# TaxBenefitSystem instance declared after formulas
tax_benefit_system = CountryTaxBenefitSystem()
tax_benefit_system.add_variables(formula_1, formula_2, formula_3, formula_4)
reference_period = periods.period(u'2013-01')
simulation = tax_benefit_system.new_scenario().init_from_attributes(
period = reference_period.first_month,
).new_simulation(debug = True)
formula_1_result = simulation.calculate('formula_1', period = reference_period)
formula_2_result = simulation.calculate('formula_2', period = reference_period)
formula_3_holder = simulation.holder_by_name['formula_3']
def test_cache():
assert_near(formula_1_result, [0])
assert_near(formula_2_result, [1])
def test_get_extra_param_names():
assert formula_3_holder.get_extra_param_names(period = None) == ('choice',)
def test_json_conversion():
print(formula_3_holder.to_value_json())
assert str(formula_3_holder.to_value_json()) == \
"{'2013-01': {'{choice: 1}': [1], '{choice: 0}': [0]}}"
def test_base_functions():
assert simulation.calculate('formula_4', '2013-01', extra_params = [0]) == 0
assert simulation.calculate('formula_4', '2013-01', extra_params = [1]) == 1
# With the 'requested_period_last_value' base_function,
# the value on an month can be infered from the year value, without running the function for that month
assert simulation.calculate('formula_4', "2013-04", extra_params = [1]) == 1
| # -*- coding: utf-8 -*-
from openfisca_core import periods
from openfisca_core.columns import IntCol, BoolCol
from openfisca_core.periods import MONTH
from openfisca_core.variables import Variable
import openfisca_dummy_country as dummy_country
from openfisca_dummy_country.entities import Individu
from openfisca_core.tools import assert_near
from openfisca_core.base_functions import requested_period_last_value
class formula_1(Variable):
column = IntCol
entity = Individu
definition_period = MONTH
def function(self, simulation, period):
return simulation.calculate('formula_3', period, extra_params = [0])
class formula_2(Variable):
column = IntCol
entity = Individu
definition_period = MONTH
def function(self, simulation, period):
return simulation.calculate('formula_3', period, extra_params = [1])
class formula_3(Variable):
column = IntCol
entity = Individu
definition_period = MONTH
def function(self, simulation, period, choice):
return self.zeros() + choice
class formula_4(Variable):
column = BoolCol
entity = Individu
base_function = requested_period_last_value
definition_period = MONTH
def function(self, simulation, period, choice):
return self.zeros() + choice
# TaxBenefitSystem instance declared after formulas
tax_benefit_system = dummy_country.DummyTaxBenefitSystem()
tax_benefit_system.add_variables(formula_1, formula_2, formula_3, formula_4)
reference_period = periods.period(u'2013-01')
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period.first_month,
parent1 = dict(),
).new_simulation(debug = True)
formula_1_result = simulation.calculate('formula_1', period = reference_period)
formula_2_result = simulation.calculate('formula_2', period = reference_period)
formula_3_holder = simulation.holder_by_name['formula_3']
def test_cache():
assert_near(formula_1_result, [0])
assert_near(formula_2_result, [1])
def test_get_extra_param_names():
assert formula_3_holder.get_extra_param_names(period = None) == ('choice',)
def test_json_conversion():
print(formula_3_holder.to_value_json())
assert str(formula_3_holder.to_value_json()) == \
"{'2013-01': {'{choice: 1}': [1], '{choice: 0}': [0]}}"
def test_base_functions():
assert simulation.calculate('formula_4', '2013-01', extra_params = [0]) == 0
assert simulation.calculate('formula_4', '2013-01', extra_params = [1]) == 1
# With the 'requested_period_last_value' base_function,
# the value on an month can be infered from the year value, without running the function for that month
assert simulation.calculate('formula_4', "2013-04", extra_params = [1]) == 1
| Python | 0.000031 |
205df82b9eabed709db90c8de3473a883c9c2c1b | reorder tinymce toolbars | wheelcms_axle/settings/settings_tinymce.py | wheelcms_axle/settings/settings_tinymce.py | TINYMCE_DEFAULT_CONFIG = {
'theme': "advanced",
'content_css': '/static/css/wheel_content.css',
'style_formats': [
{ 'title': 'Images'},
{ 'title': 'Original Size Image', 'selector': 'img', 'attributes': {'class': 'img_content_original' }},
{ 'title': 'Thumbnail Image', 'selector': 'img', 'attributes': {'class': 'img_content_thumb' }},
{ 'title': 'Small Image', 'selector': 'img', 'attributes': {'class': 'img_content_small' }},
{ 'title': 'Medium Image', 'selector': 'img', 'attributes': {'class': 'img_content_medium'} },
{ 'title': 'Large Image', 'selector': 'img', 'attributes': {'class': 'img_content_large' }},
#{ 'title': 'Test'},
#{ 'title': "Boohoold", 'inline': 'b' },
],
'relative_urls': False,
'theme_advanced_toolbar_location':'top',
'theme_advanced_resizing':True,
'plugins':'table, paste, wheel_browser',
'table_styles' : "Header 1=header1;Header 2=header2;Header 3=header3",
'table_cell_styles' : "Header 1=header1;Header 2=header2;Header 3=header3;Table Cell=tableCel1",
'table_row_styles' : "Header 1=header1;Header 2=header2;Header 3=header3;Table Row=tableRow1",
'table_cell_limit' : 100,
'table_row_limit' : 5,
'table_col_limit' : 5,
'width':800,
'height':600,
'theme_advanced_buttons1' : "|,undo,redo,|,styleselect,formatselect,|,bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,bullist,numlist,|,outdent,indent,|,sub,sup,|,charmap",
'theme_advanced_buttons2' : "link,unlink,anchor,image,cleanup,code,hr,removeformat,visualaid,|,tablecontrols,|,pastetext,pasteword,selectall",
'paste_auto_cleanup_on_paste' : True
}
| TINYMCE_DEFAULT_CONFIG = {
'theme': "advanced",
'content_css': '/static/css/wheel_content.css',
'style_formats': [
{ 'title': 'Images'},
{ 'title': 'Original Size Image', 'selector': 'img', 'attributes': {'class': 'img_content_original' }},
{ 'title': 'Thumbnail Image', 'selector': 'img', 'attributes': {'class': 'img_content_thumb' }},
{ 'title': 'Small Image', 'selector': 'img', 'attributes': {'class': 'img_content_small' }},
{ 'title': 'Medium Image', 'selector': 'img', 'attributes': {'class': 'img_content_medium'} },
{ 'title': 'Large Image', 'selector': 'img', 'attributes': {'class': 'img_content_large' }},
#{ 'title': 'Test'},
#{ 'title': "Boohoold", 'inline': 'b' },
],
'relative_urls': False,
'theme_advanced_toolbar_location':'top',
'theme_advanced_resizing':True,
'plugins':'table, paste, wheel_browser',
'table_styles' : "Header 1=header1;Header 2=header2;Header 3=header3",
'table_cell_styles' : "Header 1=header1;Header 2=header2;Header 3=header3;Table Cell=tableCel1",
'table_row_styles' : "Header 1=header1;Header 2=header2;Header 3=header3;Table Row=tableRow1",
'table_cell_limit' : 100,
'table_row_limit' : 5,
'table_col_limit' : 5,
'width':800,
'height':600,
'theme_advanced_buttons1' : "bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,styleselect,formatselect",
'theme_advanced_buttons2' : "bullist,numlist,|,outdent,indent,|,undo,redo,|,link,unlink,anchor,image,cleanup,help,code,hr,removeformat,visualaid,|,sub,sup,|,charmap",
'theme_advanced_buttons3': "tablecontrols,|,pastetext,pasteword,selectall",
'paste_auto_cleanup_on_paste' : True
}
| Python | 0.000001 |
e9bac7c9d980889cbab098934c392b7514c41a00 | remove printall plugin. | plugins/default.py | plugins/default.py | from alebot import Alebot, Hook, Event
class ConnectionReadyHook(Hook):
"""
This is a hook that can be subclassed in case you want to react
on a irc connection that is ready for commands. It waits for
the end of the motd, or the message that there is no motd.
The :func:`match` function was implemented to listen to the
correct events. You will just have to overwrite the :func`call`
to actually do something.
"""
def match(self, event):
return (event.name == '376' or event.name == '422')
class CommandHook(Hook):
"""
This is a hook that can be subclassed in case you want to react
to a message on a channel or in private. It will react to the
bot's current nickname followed by a colon and the command
specified in the command attribute.
"""
command = None
def match(self, event):
return (event.name == 'PRIVMSG' and event.body == '%s: %s' % (
self.bot.config.get('nick'), self.command))
@Alebot.hook
class SocketConnectedHook(Hook):
"""
As the bot does nothing itself, this plugin takes care of
identifying the bot with the server. Yeah, seriously.
It uses the made up `SOCK_CONNECTED` event that is not even
an actual IRC event..
"""
def match(self, event):
return (event.name == 'SOCK_CONNECTED')
def call(self, event):
print("Socket is ready, logging in.")
self.send_raw("NICK %s" % self.bot.config['nick'])
self.send_raw("USER %s * %s :%s" % (
self.bot.config['ident'],
self.bot.config['ident'],
self.bot.config['realname']
))
@Alebot.hook
class PingPong(Hook):
"""
As the bot does nothing by itself, this plugin takes care of
sending PONGs as answer to pings, as the bot won't even do that.
It matches the `PING` event to do that.
"""
def match(self, event):
return (event.name == 'PING')
def call(self, event):
print('Received ping, sending pong.')
self.send_raw('PONG %s' % event.body)
@Alebot.hook
class JoinOnConnect(ConnectionReadyHook):
"""
Join channels defined in the config file options `channels` on
connection. If there are any definied, if not, it does not
join any channels.
"""
def call(self, event):
print("Joining channels..")
channels = self.bot.config.get('channels', [])
for channel in channels:
self.send_raw('JOIN %s' % channel)
| from alebot import Alebot, Hook, Event
class ConnectionReadyHook(Hook):
"""
This is a hook that can be subclassed in case you want to react
on a irc connection that is ready for commands. It waits for
the end of the motd, or the message that there is no motd.
The :func:`match` function was implemented to listen to the
correct events. You will just have to overwrite the :func`call`
to actually do something.
"""
def match(self, event):
return (event.name == '376' or event.name == '422')
class CommandHook(Hook):
"""
This is a hook that can be subclassed in case you want to react
to a message on a channel or in private. It will react to the
bot's current nickname followed by a colon and the command
specified in the command attribute.
"""
command = None
def match(self, event):
return (event.name == 'PRIVMSG' and event.body == '%s: %s' % (
self.bot.config.get('nick'), self.command))
@Alebot.hook
class SocketConnectedHook(Hook):
"""
As the bot does nothing itself, this plugin takes care of
identifying the bot with the server. Yeah, seriously.
It uses the made up `SOCK_CONNECTED` event that is not even
an actual IRC event..
"""
def match(self, event):
return (event.name == 'SOCK_CONNECTED')
def call(self, event):
print("Socket is ready, logging in.")
self.send_raw("NICK %s" % self.bot.config['nick'])
self.send_raw("USER %s * %s :%s" % (
self.bot.config['ident'],
self.bot.config['ident'],
self.bot.config['realname']
))
@Alebot.hook
class PingPong(Hook):
"""
As the bot does nothing by itself, this plugin takes care of
sending PONGs as answer to pings, as the bot won't even do that.
It matches the `PING` event to do that.
"""
def match(self, event):
return (event.name == 'PING')
def call(self, event):
print('Received ping, sending pong.')
self.send_raw('PONG %s' % event.body)
@Alebot.hook
class JoinOnConnect(ConnectionReadyHook):
"""
Join channels defined in the config file options `channels` on
connection. If there are any definied, if not, it does not
join any channels.
"""
def call(self, event):
print("Joining channels..")
channels = self.bot.config.get('channels', [])
for channel in channels:
self.send_raw('JOIN %s' % channel)
@Alebot.hook
class PrintAll(Hook):
"""
Prints all server input to the terminal.
"""
def match(self, event):
return True
def call(self, event):
print(event.name, event.user, event.target, event.body)
| Python | 0 |
6015cb96e4a35112efcf0ee35e38c88a94a58004 | Add API key to headers | cogs/diffusion.py | cogs/diffusion.py | import asyncio
import json
import backoff
from typing import Any, Literal
from discord import Embed
from discord.ext import commands
from aiohttp import ClientResponseError
from bot import QTBot
from utils.custom_context import CustomContext
class DiffusionError(Exception):
pass
class Diffusion(commands.Cog):
INPUT = {
"input": {
"width": 512,
"height": 512,
"num_outputs": "1",
"guidance_scale": 7.5,
"prompt_strength": 0.8,
"num_inference_steps": 50,
}
}
URL = "https://replicate.com/api/models/stability-ai/stable-diffusion/versions/a9758cbfbd5f3c2094457d996681af52552901775aa2d6dd0b17fd15df959bef/predictions"
HEADERS = {"Content-Type": "application/json"}
def __init__(self, bot: QTBot):
with open("data/apikeys.json") as f:
self.api_key = json.load(f)["stable_diffusion"]
self.HEADERS.update({"Authorization": f"Token {self.api_key}"})
self.bot = bot
@backoff.on_exception(backoff.expo, ClientResponseError, max_tries=3)
async def req(
self,
verb: Literal["GET", "POST"],
url: str = "",
params: dict = {},
headers: dict = {},
data: dict = None,
) -> Any:
resp = await self.bot.aio_session.request(
verb, f"{self.URL}{url}", params=params, headers={**headers, **self.HEADERS}, json=data
)
resp.raise_for_status()
return await resp.json()
async def start_job(self, prompt: str) -> str:
payload = {**self.INPUT, "prompt": prompt}
resp = await self.req("POST", data=payload)
if resp["error"]:
raise DiffusionError(resp["error"])
return resp["uuid"]
async def check_progress(self, id: str) -> str:
total_checks = 0
while True:
resp = (await self.req("GET", f"/{id}"))["prediction"]
if total_checks >= 10:
raise asyncio.TimeoutError("Couldn't get a result after 20 seconds. Aborting.")
if resp["error"]:
raise DiffusionError(resp["error"])
if resp["completed_at"]:
return resp["output"][0]
total_checks += 1
asyncio.sleep(2)
@commands.command(aliases=["diffuse", "sd"])
async def diffusion(self, ctx: CustomContext, *, prompt: str) -> None:
try:
job_id = await self.start_job(prompt)
except DiffusionError as e:
return await ctx.error("API Error", str(e))
except ClientResponseError as e:
return await ctx.error("API Error", f"Received status code `{e.status}`\n{e.message}")
try:
image_url = await self.check_progress(job_id)
except DiffusionError as e:
return await ctx.error("API Error", str(e))
except ClientResponseError as e:
return await ctx.error("API Error", f"Received status code `{e.status}`\n{e.message}")
return await ctx.send(f"{ctx.author.mention}: {prompt}\n{image_url}")
def setup(bot):
bot.add_cog(Diffusion(bot))
| import asyncio
import backoff
from typing import Any, Literal
from discord import Embed
from discord.ext import commands
from aiohttp import ClientResponseError
from bot import QTBot
from utils.custom_context import CustomContext
class DiffusionError(Exception):
pass
class Diffusion(commands.Cog):
INPUT = {
"input": {
"width": 512,
"height": 512,
"num_outputs": "1",
"guidance_scale": 7.5,
"prompt_strength": 0.8,
"num_inference_steps": 50,
}
}
URL = "https://replicate.com/api/models/stability-ai/stable-diffusion/versions/a9758cbfbd5f3c2094457d996681af52552901775aa2d6dd0b17fd15df959bef/predictions"
def __init__(self, bot: QTBot):
self.bot = bot
@backoff.on_exception(backoff.expo, ClientResponseError, max_tries=3)
async def req(
self, verb: Literal["GET", "POST"], url: str = "", params: dict = None, headers: dict = None, data: dict = None
) -> Any:
resp = await self.bot.aio_session.request(verb, f"{self.URL}{url}", params=params, headers=headers, json=data)
resp.raise_for_status()
return await resp.json()
async def start_job(self, prompt: str) -> str:
payload = {**self.INPUT, "prompt": prompt}
resp = await self.req("POST", data=payload)
if resp["error"]:
raise DiffusionError(resp["error"])
return resp["uuid"]
async def check_progress(self, id: str) -> str:
total_checks = 0
while True:
resp = (await self.req("GET", f"/{id}"))["prediction"]
if total_checks >= 10:
raise asyncio.TimeoutError("Couldn't get a result after 20 seconds. Aborting.")
if resp["error"]:
raise DiffusionError(resp["error"])
if resp["completed_at"]:
return resp["output"][0]
total_checks += 1
asyncio.sleep(2)
@commands.command(aliases=["diffuse", "sd"])
async def diffusion(self, ctx: CustomContext, *, prompt: str) -> None:
try:
job_id = await self.start_job(prompt)
except DiffusionError as e:
return await ctx.error("API Error", str(e))
except ClientResponseError as e:
return await ctx.error("API Error", f"Received status code {e.status}\n{e.message}")
try:
image_url = await self.check_progress(job_id)
except DiffusionError as e:
return await ctx.error("API Error", str(e))
except ClientResponseError as e:
return await ctx.error("API Error", f"Received status code {e.status}\n{e.message}")
return await ctx.send(f"{ctx.author.mention}: {prompt}\n{image_url}")
def setup(bot):
bot.add_cog(Diffusion(bot))
| Python | 0 |
1363a09366594602562bcf34a5368c71299c4755 | Add zero in lol-regexp | plugins/lolrate.py | plugins/lolrate.py | from datetime import datetime
from toflib import cmd, Plugin
import re
class TimeSlice():
def __init__(self):
t = datetime.now()
self.date = t.date()
self.hour = t.hour
self.kevins = dict()
self.count = 0
def __str__(self):
return "%s %02dh-%02dh : %d lolz" % ( self.date.strftime("%d %b")
, self.hour
, self.hour+1 % 24
, self.count
)
def __cmp__(self, other):
return cmp ( (self.date, self.hour)
, (other.date, other.hour)
)
def __hash__(self):
return hash(self.date) + hash(self.hour)
def lol(self, nick, count):
self.kevins.setdefault(nick,0)
self.kevins[nick] += count
self.count += count
class PluginLolrate(Plugin):
def __init__(self, bot):
Plugin.__init__(self, bot)
self.lolRate = [TimeSlice()]
bot._mutable_attributes['lolRateDepth'] = int
def handle_msg(self, msg_text, chan, nick):
lulz = len(re.findall("[Ll]+[oO0]+[Ll]+", msg_text))
if lulz > 0:
ts = TimeSlice()
if ts != self.lolRate[0]:
self.lolRate.insert(0,ts)
if len(self.lolRate) > self.bot.lolRateDepth:
self.lolRate.pop()
self.lolRate[0].lol(nick,lulz)
@cmd(0)
def cmd_lulz(self, chan, args):
for lolade in self.lolRate:
self.say(str(lolade))
@cmd(0)
def cmd_kevin(self, chan, args):
kevins = dict()
for lolade in self.lolRate:
for kevin in lolade.kevins.iteritems():
kevins.setdefault(kevin[0],0)
kevins[kevin[0]] += kevin[1]
if len(kevins) > 0:
kevin = max(kevins,key=lambda a: kevins.get(a))
lolades = kevins[kevin]
self.say(str(kevin) + " est le Kevin du moment avec " + str(lolades) + " lolade" + ("s" if lolades > 1 else ""))
else:
self.say("pas de Kevin")
| from datetime import datetime
from toflib import cmd, Plugin
import re
class TimeSlice():
def __init__(self):
t = datetime.now()
self.date = t.date()
self.hour = t.hour
self.kevins = dict()
self.count = 0
def __str__(self):
return "%s %02dh-%02dh : %d lolz" % ( self.date.strftime("%d %b")
, self.hour
, self.hour+1 % 24
, self.count
)
def __cmp__(self, other):
return cmp ( (self.date, self.hour)
, (other.date, other.hour)
)
def __hash__(self):
return hash(self.date) + hash(self.hour)
def lol(self, nick, count):
self.kevins.setdefault(nick,0)
self.kevins[nick] += count
self.count += count
class PluginLolrate(Plugin):
def __init__(self, bot):
Plugin.__init__(self, bot)
self.lolRate = [TimeSlice()]
bot._mutable_attributes['lolRateDepth'] = int
def handle_msg(self, msg_text, chan, nick):
lulz = len(re.findall("[Ll]+[oO]+[Ll]+", msg_text))
if lulz > 0:
ts = TimeSlice()
if ts != self.lolRate[0]:
self.lolRate.insert(0,ts)
if len(self.lolRate) > self.bot.lolRateDepth:
self.lolRate.pop()
self.lolRate[0].lol(nick,lulz)
@cmd(0)
def cmd_lulz(self, chan, args):
for lolade in self.lolRate:
self.say(str(lolade))
@cmd(0)
def cmd_kevin(self, chan, args):
kevins = dict()
for lolade in self.lolRate:
for kevin in lolade.kevins.iteritems():
kevins.setdefault(kevin[0],0)
kevins[kevin[0]] += kevin[1]
if len(kevins) > 0:
kevin = max(kevins,key=lambda a: kevins.get(a))
lolades = kevins[kevin]
self.say(str(kevin) + " est le Kevin du moment avec " + str(lolades) + " lolade" + ("s" if lolades > 1 else ""))
else:
self.say("pas de Kevin")
| Python | 0.999315 |
4d2f3431c587015d6962250fcc4ebcda06f0f988 | Update TOKEN_INCORRECT_CVV error in dummy payment | saleor/payment/gateways/dummy/__init__.py | saleor/payment/gateways/dummy/__init__.py | import uuid
from typing import Optional
from ... import TransactionKind
from ...interface import GatewayConfig, GatewayResponse, PaymentData
TOKEN_PREAUTHORIZE_SUCCESS = "4111111111111112"
TOKEN_PREAUTHORIZE_DECLINE = "4111111111111111"
TOKEN_EXPIRED = "4000000000000069"
TOKEN_INSUFFICIENT_FUNDS = "4000000000009995"
TOKEN_INCORRECT_CVV = "4000000000000127"
TOKEN_DECLINE = "4000000000000002"
PREAUTHORIZED_TOKENS = [TOKEN_PREAUTHORIZE_DECLINE, TOKEN_PREAUTHORIZE_SUCCESS]
TOKEN_VALIDATION_MAPPING = {
TOKEN_EXPIRED: "Card expired",
TOKEN_INSUFFICIENT_FUNDS: "Insufficient funds",
TOKEN_INCORRECT_CVV: "Incorrect CVV",
TOKEN_DECLINE: "Card declined",
TOKEN_PREAUTHORIZE_DECLINE: "Card declined",
}
def dummy_success():
return True
def validate_token(token: Optional[str]):
return TOKEN_VALIDATION_MAPPING.get(token, None) if token else None
def get_client_token(**_):
return str(uuid.uuid4())
def authorize(
payment_information: PaymentData, config: GatewayConfig
) -> GatewayResponse:
success = dummy_success()
error = None
if not success:
error = "Unable to authorize transaction"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.AUTH,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def void(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
error = None
success = dummy_success()
if not success:
error = "Unable to void the transaction."
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.VOID,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def capture(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
"""Perform capture transaction."""
error = validate_token(payment_information.token)
success = not error
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.CAPTURE,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def confirm(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
"""Perform confirm transaction."""
error = None
success = dummy_success()
if not success:
error = "Unable to process capture"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.CAPTURE,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def refund(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
error = None
success = dummy_success()
if not success:
error = "Unable to process refund"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.REFUND,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def process_payment(
payment_information: PaymentData, config: GatewayConfig
) -> GatewayResponse:
"""Process the payment."""
token = payment_information.token
if token in PREAUTHORIZED_TOKENS:
authorize_response = authorize(payment_information, config)
if not config.auto_capture:
return authorize_response
return capture(payment_information, config)
| import uuid
from typing import Optional
from ... import TransactionKind
from ...interface import GatewayConfig, GatewayResponse, PaymentData
TOKEN_PREAUTHORIZE_SUCCESS = "4111111111111112"
TOKEN_PREAUTHORIZE_DECLINE = "4111111111111111"
TOKEN_EXPIRED = "4000000000000069"
TOKEN_INSUFFICIENT_FUNDS = "4000000000009995"
TOKEN_INCORRECT_CVV = "4000000000000127"
TOKEN_DECLINE = "4000000000000002"
PREAUTHORIZED_TOKENS = [TOKEN_PREAUTHORIZE_DECLINE, TOKEN_PREAUTHORIZE_SUCCESS]
TOKEN_VALIDATION_MAPPING = {
TOKEN_EXPIRED: "Card expired",
TOKEN_INSUFFICIENT_FUNDS: "Insufficient funds",
TOKEN_INCORRECT_CVV: "Incorrect cvv",
TOKEN_DECLINE: "Card declined",
TOKEN_PREAUTHORIZE_DECLINE: "Card declined",
}
def dummy_success():
return True
def validate_token(token: Optional[str]):
return TOKEN_VALIDATION_MAPPING.get(token, None) if token else None
def get_client_token(**_):
return str(uuid.uuid4())
def authorize(
payment_information: PaymentData, config: GatewayConfig
) -> GatewayResponse:
success = dummy_success()
error = None
if not success:
error = "Unable to authorize transaction"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.AUTH,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def void(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
error = None
success = dummy_success()
if not success:
error = "Unable to void the transaction."
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.VOID,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def capture(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
"""Perform capture transaction."""
error = validate_token(payment_information.token)
success = not error
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.CAPTURE,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def confirm(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
"""Perform confirm transaction."""
error = None
success = dummy_success()
if not success:
error = "Unable to process capture"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.CAPTURE,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def refund(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
error = None
success = dummy_success()
if not success:
error = "Unable to process refund"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.REFUND,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def process_payment(
payment_information: PaymentData, config: GatewayConfig
) -> GatewayResponse:
"""Process the payment."""
token = payment_information.token
if token in PREAUTHORIZED_TOKENS:
authorize_response = authorize(payment_information, config)
if not config.auto_capture:
return authorize_response
return capture(payment_information, config)
| Python | 0 |
7b77d2569b8056c4c1b184503165a81a426df2e9 | Allow configuring separator in headeranchor | mdownx/headeranchor.py | mdownx/headeranchor.py | """
mdownx.headeranchor
An extension for Python Markdown.
Github header anchors
MIT license.
Copyright (c) 2014 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
from markdown.extensions.headerid import slugify, stashedHTML2text, itertext
LINK = '<a name="user-content-%(id)s" href="#%(id)s" class="headeranchor-link" aria-hidden="true"><span class="headeranchor"></span></a>'
class HeaderAnchorTreeprocessor(Treeprocessor):
def run(self, root):
""" Add header anchors """
for tag in root.getiterator():
if tag.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
if "id" in tag.attrib:
id = tag.get('id')
else:
id = stashedHTML2text(''.join(itertext(tag)), self.md)
id = slugify(id, self.config.get('sep'))
tag.set('id', id)
tag.text = self.markdown.htmlStash.store(
LINK % {"id": id},
safe=True
) + tag.text
return root
class HeaderAnchorExtension(Extension):
def __init__(self, configs):
self.config = {
'sep': ['-', "Separator to use when creating header ids - Default: '-'"]
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
"""Add HeaderAnchorTreeprocessor to Markdown instance"""
self.processor = HeaderAnchorTreeprocessor(md)
self.processor.config = self.getConfigs()
self.processor.md = md
if 'toc' in md.treeprocessors.keys():
insertion = ">toc"
else:
insertion = ">_end"
md.treeprocessors.add("headeranchor", self.processor, insertion)
md.registerExtension(self)
def makeExtension(configs={}):
return HeaderAnchorExtension(configs=configs)
| """
mdownx.headeranchor
An extension for Python Markdown.
Github style tasklists
MIT license.
Copyright (c) 2014 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
from markdown.extensions.headerid import slugify, stashedHTML2text, itertext
LINK = '<a name="user-content-%(id)s" href="#%(id)s" class="headeranchor-link" aria-hidden="true"><span class="headeranchor"></span></a>'
class HeaderAnchorTreeprocessor(Treeprocessor):
def run(self, root):
""" Add header anchors """
for tag in root.getiterator():
if tag.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
if "id" in tag.attrib:
id = tag.get('id')
else:
id = stashedHTML2text(''.join(itertext(tag)), self.md)
id = slugify(id, '-')
tag.set('id', id)
tag.text = self.markdown.htmlStash.store(
LINK % {"id": id},
safe=True
) + tag.text
return root
class HeaderAnchorExtension(Extension):
def extendMarkdown(self, md, md_globals):
"""Add HeaderAnchorTreeprocessor to Markdown instance"""
self.processor = HeaderAnchorTreeprocessor(md)
self.processor.md = md
if 'toc' in md.treeprocessors.keys():
insertion = ">toc"
else:
insertion = ">_end"
md.treeprocessors.add("headeranchor", self.processor, insertion)
md.registerExtension(self)
def makeExtension(configs={}):
return HeaderAnchorExtension(configs=configs)
| Python | 0 |
bfb014b65932902f9cc07bc85c3aadfdb320c438 | add static config | chapter5/growth_studio/settings.py | chapter5/growth_studio/settings.py | """
Django settings for growth_studio project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q2c4xbdh)hf-$z7v1dyai3n^+(g%l5ogi17rm+rud^ysbx-(h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'growth_studio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'growth_studio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
| """
Django settings for growth_studio project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q2c4xbdh)hf-$z7v1dyai3n^+(g%l5ogi17rm+rud^ysbx-(h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'growth_studio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'growth_studio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
| Python | 0 |
a9fd9bcecc5d237d767a2fbb773e0780cce7fa99 | Add combinations of all the python bins. | tests/functional/test_create.py | tests/functional/test_create.py | import os
import sys
import pytest
import scripttest
IS_WINDOWS = (
sys.platform.startswith("win") or
(sys.platform == "cli" and os.name == "nt")
)
IS_26 = sys.version_info[:2] == (2, 6)
PYTHON_BINS = [
"C:\\Python27\\python.exe",
"C:\\Python27-x64\\python.exe",
"C:\\Python33\\python.exe",
"C:\\Python33-x64\\python.exe",
"C:\\Python34\\python.exe",
"C:\\Python34-x64\\python.exe",
"C:\\PyPy\\pypy.exe",
"C:\\PyPy3\\pypy.exe",
None,
"python",
"python2.6",
"python2.7",
"python3.2",
"python3.3",
"python3.4",
"pypy",
]
@pytest.yield_fixture
def env(request):
env = scripttest.TestFileEnvironment()
try:
yield env
finally:
env.clear()
@pytest.mark.parametrize('python', PYTHON_BINS)
def test_create_via_script(env, python):
extra = ['--python', python] if python else []
result = env.run('virtualenv', 'myenv', *extra)
if IS_WINDOWS:
assert 'myenv\\Scripts\\activate.bat' in result.files_created
assert 'myenv\\Scripts\\activate.ps1' in result.files_created
assert 'myenv\\Scripts\\activate_this.py' in result.files_created
assert 'myenv\\Scripts\\deactivate.bat' in result.files_created
assert 'myenv\\Scripts\\pip.exe' in result.files_created
assert 'myenv\\Scripts\\python.exe' in result.files_created
else:
assert 'myenv/bin/activate.sh' in result.files_created
assert 'myenv/bin/activate_this.py' in result.files_created
assert 'myenv/bin/python' in result.files_created
@pytest.mark.parametrize('python', PYTHON_BINS)
def test_create_via_module(env, python):
extra = ['--python', python] if python else []
result = env.run('python', '-mvirtualenv.__main__' if IS_26 else '-mvirtualenv', 'myenv', *extra)
if IS_WINDOWS:
assert 'myenv\\Scripts\\activate.bat' in result.files_created
assert 'myenv\\Scripts\\activate.ps1' in result.files_created
assert 'myenv\\Scripts\\activate_this.py' in result.files_created
assert 'myenv\\Scripts\\deactivate.bat' in result.files_created
assert 'myenv\\Scripts\\pip.exe' in result.files_created
assert 'myenv\\Scripts\\python.exe' in result.files_created
else:
assert 'myenv/bin/activate.sh' in result.files_created
assert 'myenv/bin/activate_this.py' in result.files_created
assert 'myenv/bin/python' in result.files_created
| import os
import sys
import pytest
import scripttest
is_windows = (
sys.platform.startswith("win") or
(sys.platform == "cli" and os.name == "nt")
)
is_26 = sys.version_info[:2] == (2, 6)
@pytest.yield_fixture
def env(request):
env = scripttest.TestFileEnvironment()
try:
yield env
finally:
env.clear()
def test_create_via_script(env):
result = env.run('virtualenv', 'myenv')
if is_windows:
assert 'myenv\\Scripts\\activate.bat' in result.files_created
assert 'myenv\\Scripts\\activate.ps1' in result.files_created
assert 'myenv\\Scripts\\activate_this.py' in result.files_created
assert 'myenv\\Scripts\\deactivate.bat' in result.files_created
assert 'myenv\\Scripts\\pip.exe' in result.files_created
assert 'myenv\\Scripts\\python.exe' in result.files_created
else:
assert 'myenv/bin/activate.sh' in result.files_created
assert 'myenv/bin/activate_this.py' in result.files_created
assert 'myenv/bin/python' in result.files_created
def test_create_via_module(env):
result = env.run('python', '-mvirtualenv.__main__' if is_26 else '-mvirtualenv', 'myenv')
if is_windows:
assert 'myenv\\Scripts\\activate.bat' in result.files_created
assert 'myenv\\Scripts\\activate.ps1' in result.files_created
assert 'myenv\\Scripts\\activate_this.py' in result.files_created
assert 'myenv\\Scripts\\deactivate.bat' in result.files_created
assert 'myenv\\Scripts\\pip.exe' in result.files_created
assert 'myenv\\Scripts\\python.exe' in result.files_created
else:
assert 'myenv/bin/activate.sh' in result.files_created
assert 'myenv/bin/activate_this.py' in result.files_created
assert 'myenv/bin/python' in result.files_created
| Python | 0 |
c407d023a59b5863b5890836c17a1aa1208244fa | use local_base_url from webfront to compose the API URL | tests/functional/test_webapi.py | tests/functional/test_webapi.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import requests
from ava.util.tests import AgentTest
SUCCESS = 'success'
ERROR = 'error'
class TestWebAPI(AgentTest):
# api_url = 'http://127.0.0.1:5080/api'
api_url = ''
@classmethod
def setUpClass(cls):
AgentTest.setUpClass()
webfront = cls.agent.context().lookup('webfront')
cls.api_url = webfront.local_base_url + 'api'
def test_ping(self):
r = requests.get(self.api_url + '/ping')
assert r.status_code == 200
data = r.json()
assert data['status'] == SUCCESS
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import requests
from ava.util.tests import AgentTest
SUCCESS = 'success'
ERROR = 'error'
class TestWebAPI(AgentTest):
api_url = 'http://127.0.0.1:5080/api'
def test_ping(self):
r = requests.get(self.api_url + '/ping')
assert r.status_code == 200
data = r.json()
assert data['status'] == SUCCESS
| Python | 0 |
6028ae4c2c75a29c0a050429f7ab76da129791fd | Improve keosd_auto_launch_test by checking stderr | tests/keosd_auto_launch_test.py | tests/keosd_auto_launch_test.py | #!/usr/bin/env python3
# This script tests that cleos launches keosd automatically when keosd is not
# running yet.
import subprocess
def run_cleos_wallet_command(command: str, no_auto_keosd: bool):
"""Run the given cleos command and return subprocess.CompletedProcess."""
args = ['./programs/cleos/cleos']
if no_auto_keosd:
args.append('--no-auto-keosd')
args += 'wallet', command
return subprocess.run(args,
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
def stop_keosd():
"""Stop the default keosd instance."""
run_cleos_wallet_command('stop', no_auto_keosd=True)
def keosd_auto_launch_test():
"""Test that keos auto-launching works but can be optionally inhibited."""
stop_keosd()
# Make sure that when '--no-auto-keosd' is given, keosd is not started by
# cleos.
completed_process = run_cleos_wallet_command('list', no_auto_keosd=True)
assert completed_process.returncode != 0
assert b'Failed to connect to keosd' in completed_process.stderr
# Verify that keosd auto-launching works.
completed_process = run_cleos_wallet_command('list', no_auto_keosd=False)
assert completed_process.returncode == 0
assert b'launched' in completed_process.stderr
try:
keosd_auto_launch_test()
finally:
stop_keosd()
| #!/usr/bin/env python3
# This script tests that cleos launches keosd automatically when keosd is not
# running yet.
import subprocess
def run_cleos_wallet_command(command: str, no_auto_keosd: bool):
"""Run the given cleos command and return subprocess.CompletedProcess."""
args = ['./programs/cleos/cleos']
if no_auto_keosd:
args.append('--no-auto-keosd')
args += 'wallet', command
return subprocess.run(args,
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def stop_keosd():
"""Stop the default keosd instance."""
run_cleos_wallet_command('stop', no_auto_keosd=True)
def keosd_auto_launch_test():
"""Test that keos auto-launching works but can be optionally inhibited."""
stop_keosd()
# Make sure that when '--no-auto-keosd' is given, keosd is not started by
# cleos.
assert run_cleos_wallet_command('list',
no_auto_keosd=True).returncode != 0
# Verify that keosd auto-launching works.
assert run_cleos_wallet_command('list',
no_auto_keosd=False).returncode == 0
try:
keosd_auto_launch_test()
finally:
stop_keosd()
| Python | 0.000001 |
ae1469e450616ba1ac50958262c2c834b75fc77c | store reference type in lowercase | citeproc/source/bibtex/bibparse.py | citeproc/source/bibtex/bibparse.py | #!/usr/bin/python
#
# Simple, naive bibtex parser
#
# Vassilios Karakoidas (2008) - vassilios.karakoidas@gmail.com
#
# bib categories
#
# @Article{
# @Book{
# @Booklet{
# @InBook{
# @InCollection{
# @InProceedings{
# @Manual{
# @MastersThesis{
# @Misc{
# @PhDThesis{
# @Proceedings{
# @TechReport{
# @Unpublished{
#
import re
import os
from io import StringIO
class BibtexEntry:
def __init__(self, bibfile):
self.key = ''
self.data = {}
self.btype = ''
self.data['filename'] = bibfile
def getKey(self, key):
if(key.lower().strip() == self.key.lower()):
return True
return False
def search(self, keywords):
for word in keywords:
for (k, v) in self.data.items():
try:
v.lower().index(word.lower())
return True
except ValueError:
continue
return False
def __get_pdf_name(self):
if len(self.key) == 0:
return None
m = re.match('(.+/[^.]+)\\.bib', self.data['filename'])
if m == None:
return None
filename = "%s/%s.pdf" % ( m.group(1).strip(), self.key.lower() )
if os.access(filename, os.O_RDONLY) == 1:
return filename
return None
def has_pdf(self):
return (self.__get_pdf_name() != None)
def export(self):
return self.__str__()
def totext(self):
return
def tohtml(self):
return
def __str__(self):
result = StringIO()
result.write("@%s{%s,\n" % ( self.btype.lower().strip(), self.key.strip() ))
for k, v in self.data.items():
result.write("\t%s = {%s},\n" % ( k.title().strip(), v.strip() ))
filename = self.__get_pdf_name()
if filename != None:
result.write("\tpdf-file = {%s},\n" % ( filename, ))
result.write('}\n')
return result.getvalue()
def parse_bib(bibfile):
bibitems = {}
bib_file = open(bibfile, "r")
re_head = re.compile('@([a-zA-Z]+)[ ]*\{[ ]*(.*),')
current = None
for l in bib_file:
mr = re_head.match(l.strip())
if mr != None:
if current == None:
current = BibtexEntry(bibfile)
else:
bibitems[current.key] = current
current = BibtexEntry(bibfile)
current.key = mr.group(2).strip()
current.btype = mr.group(1).strip().lower()
continue
try:
l.index('=')
kv_data = l.split('=')
key = kv_data[0].strip()
mr = re.search('["{](.+)["}]',kv_data[1].strip())
if mr != None:
current.data[key] = mr.group(1).strip()
except (ValueError, AttributeError):
continue
bibitems[current.key] = current
bib_file.close()
return bibitems
| #!/usr/bin/python
#
# Simple, naive bibtex parser
#
# Vassilios Karakoidas (2008) - vassilios.karakoidas@gmail.com
#
# bib categories
#
# @Article{
# @Book{
# @Booklet{
# @InBook{
# @InCollection{
# @InProceedings{
# @Manual{
# @MastersThesis{
# @Misc{
# @PhDThesis{
# @Proceedings{
# @TechReport{
# @Unpublished{
#
import re
import os
from io import StringIO
class BibtexEntry:
def __init__(self, bibfile):
self.key = ''
self.data = {}
self.btype = ''
self.data['filename'] = bibfile
def getKey(self, key):
if(key.lower().strip() == self.key.lower()):
return True
return False
def search(self, keywords):
for word in keywords:
for (k, v) in self.data.items():
try:
v.lower().index(word.lower())
return True
except ValueError:
continue
return False
def __get_pdf_name(self):
if len(self.key) == 0:
return None
m = re.match('(.+/[^.]+)\\.bib', self.data['filename'])
if m == None:
return None
filename = "%s/%s.pdf" % ( m.group(1).strip(), self.key.lower() )
if os.access(filename, os.O_RDONLY) == 1:
return filename
return None
def has_pdf(self):
return (self.__get_pdf_name() != None)
def export(self):
return self.__str__()
def totext(self):
return
def tohtml(self):
return
def __str__(self):
result = StringIO()
result.write("@%s{%s,\n" % ( self.btype.lower().strip(), self.key.strip() ))
for k, v in self.data.items():
result.write("\t%s = {%s},\n" % ( k.title().strip(), v.strip() ))
filename = self.__get_pdf_name()
if filename != None:
result.write("\tpdf-file = {%s},\n" % ( filename, ))
result.write('}\n')
return result.getvalue()
def parse_bib(bibfile):
bibitems = {}
bib_file = open(bibfile, "r")
re_head = re.compile('@([a-zA-Z]+)[ ]*\{[ ]*(.*),')
current = None
for l in bib_file:
mr = re_head.match(l.strip())
if mr != None:
if current == None:
current = BibtexEntry(bibfile)
else:
bibitems[current.key] = current
current = BibtexEntry(bibfile)
current.key = mr.group(2).strip()
current.btype = mr.group(1).strip()
continue
try:
l.index('=')
kv_data = l.split('=')
key = kv_data[0].strip()
mr = re.search('["{](.+)["}]',kv_data[1].strip())
if mr != None:
current.data[key] = mr.group(1).strip()
except (ValueError, AttributeError):
continue
bibitems[current.key] = current
bib_file.close()
return bibitems
| Python | 0.000334 |
d43ddab5908a543236a05860fb15658ec154aa5b | Fix import in test | tests/outputs/gstreamer_test.py | tests/outputs/gstreamer_test.py | import multiprocessing
import unittest
from mopidy.outputs.gstreamer import GStreamerOutput
from mopidy.utils.path import path_to_uri
from mopidy.utils.process import pickle_connection
from tests import data_folder, SkipTest
class GStreamerOutputTest(unittest.TestCase):
def setUp(self):
self.song_uri = path_to_uri(data_folder('song1.wav'))
self.output_queue = multiprocessing.Queue()
self.core_queue = multiprocessing.Queue()
self.output = GStreamerOutput(self.core_queue, self.output_queue)
def tearDown(self):
self.output.destroy()
def send_recv(self, message):
(my_end, other_end) = multiprocessing.Pipe()
message.update({'reply_to': pickle_connection(other_end)})
self.output_queue.put(message)
my_end.poll(None)
return my_end.recv()
def send(self, message):
self.output_queue.put(message)
@SkipTest
def test_play_uri_existing_file(self):
message = {'command': 'play_uri', 'uri': self.song_uri}
self.assertEqual(True, self.send_recv(message))
@SkipTest
def test_play_uri_non_existing_file(self):
message = {'command': 'play_uri', 'uri': self.song_uri + 'bogus'}
self.assertEqual(False, self.send_recv(message))
def test_default_get_volume_result(self):
message = {'command': 'get_volume'}
self.assertEqual(100, self.send_recv(message))
def test_set_volume(self):
self.send({'command': 'set_volume', 'volume': 50})
self.assertEqual(50, self.send_recv({'command': 'get_volume'}))
def test_set_volume_to_zero(self):
self.send({'command': 'set_volume', 'volume': 0})
self.assertEqual(0, self.send_recv({'command': 'get_volume'}))
def test_set_volume_to_one_hundred(self):
self.send({'command': 'set_volume', 'volume': 100})
self.assertEqual(100, self.send_recv({'command': 'get_volume'}))
@SkipTest
def test_set_state(self):
raise NotImplementedError
| import multiprocessing
import unittest
from mopidy.outputs.gstreamer import GStreamerOutput
from mopidy.process import pickle_connection
from mopidy.utils.path import path_to_uri
from tests import data_folder, SkipTest
class GStreamerOutputTest(unittest.TestCase):
def setUp(self):
self.song_uri = path_to_uri(data_folder('song1.wav'))
self.output_queue = multiprocessing.Queue()
self.core_queue = multiprocessing.Queue()
self.output = GStreamerOutput(self.core_queue, self.output_queue)
def tearDown(self):
self.output.destroy()
def send_recv(self, message):
(my_end, other_end) = multiprocessing.Pipe()
message.update({'reply_to': pickle_connection(other_end)})
self.output_queue.put(message)
my_end.poll(None)
return my_end.recv()
def send(self, message):
self.output_queue.put(message)
@SkipTest
def test_play_uri_existing_file(self):
message = {'command': 'play_uri', 'uri': self.song_uri}
self.assertEqual(True, self.send_recv(message))
@SkipTest
def test_play_uri_non_existing_file(self):
message = {'command': 'play_uri', 'uri': self.song_uri + 'bogus'}
self.assertEqual(False, self.send_recv(message))
def test_default_get_volume_result(self):
message = {'command': 'get_volume'}
self.assertEqual(100, self.send_recv(message))
def test_set_volume(self):
self.send({'command': 'set_volume', 'volume': 50})
self.assertEqual(50, self.send_recv({'command': 'get_volume'}))
def test_set_volume_to_zero(self):
self.send({'command': 'set_volume', 'volume': 0})
self.assertEqual(0, self.send_recv({'command': 'get_volume'}))
def test_set_volume_to_one_hundred(self):
self.send({'command': 'set_volume', 'volume': 100})
self.assertEqual(100, self.send_recv({'command': 'get_volume'}))
@SkipTest
def test_set_state(self):
raise NotImplementedError
| Python | 0.000001 |
010a827abdc891bd79f7474c5ef65b991edf2a1b | Update candidate_party_corrections.py | calaccess_processed/candidate_party_corrections.py | calaccess_processed/candidate_party_corrections.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes correct party affiliation for candidate in specific contests.
"""
corrections = (
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-primary/updated-contact-info.pdf # noqa
('WINSTON, ALMA MARIE', 2014, 'PRIMARY', 'GOVERNOR', 'REPUBLICAN'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-primary/certified-write-in-list.pdf # noqa
('WALLS, JIMELLE L.', 2014, 'PRIMARY', 'GOVERNOR', 'NO PARTY PREFERENCE'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2012-primary/updated-contact-info-cert-list.pdf # noqa
('ESPINOSA, GEBY E.', 2014, 'PRIMARY', 'ASSEMBLY 24', 'DEMOCRATIC'),
# http://elections.cdn.sos.ca.gov/special-elections/2011-sd28/certified-list.pdf
('VALENTINE, ROBERT S.', 2011, 'SPECIAL ELECTION', 'STATE SENATE 28', 'REPUBLICAN'),
# http://cal-access.sos.ca.gov/Campaign/Candidates/Detail.aspx?id=1273672
('WALDRON, MARIE', 2018, 'PRIMARY', 'ASSEMBLY 75', 'REPUBLICAN'),
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes correct party affiliation for candidate in specific contests.
"""
corrections = (
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-primary/updated-contact-info.pdf # noqa
('WINSTON, ALMA MARIE', 2014, 'PRIMARY', 'GOVERNOR', 'REPUBLICAN'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-primary/certified-write-in-list.pdf # noqa
('WALLS, JIMELLE L.', 2014, 'PRIMARY', 'GOVERNOR', 'NO PARTY PREFERENCE'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2012-primary/updated-contact-info-cert-list.pdf # noqa
('ESPINOSA, GEBY E.', 2014, 'PRIMARY', 'ASSEMBLY 24', 'DEMOCRATIC'),
# http://elections.cdn.sos.ca.gov/special-elections/2011-sd28/certified-list.pdf
('VALENTINE, ROBERT S.', 2011, 'SPECIAL ELECTION', 'STATE SENATE 28', 'REPUBLICAN'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-general/updated-contact-info.pdf
('KEPHART, GARY', 2014, 'GENERAL', 'STATE SENATE 36', 'DEMOCRATIC'),
)
| Python | 0.000001 |
4674a0ccbba7596cfcfd4cd21e3355b0afaa0c95 | Fix l10n history movements | postatus/status.py | postatus/status.py | import requests
import sys
import time
def format_time(t):
return time.ctime(t)
def format_short_date(t):
return time.strftime('%m/%d', time.gmtime(t))
# ./bin/l10n_status.py --app=feedback --type=history --highlight=es,pt_BR,po,hu,de,gr,fr,it,ru,ja,tr,zh_TW,zh_CN https://input.mozilla.org/static/l10n_completion.json
class Status(object):
SKIP_LOCALES = ['en_US']
def __init__(self, url, app=None, highlight=None):
self.url = url
self.app = app
self.highlight = highlight or []
self.data = []
self.created = None
def get_data(self):
if self.data:
return
resp = requests.get(self.url)
if resp.status_code != 200:
resp.raise_for_status()
self.data = resp.json()
self.created = format_time(self.data[-1]['created'])
def summary(self):
"""Generates summary data of today's state"""
self.get_data()
highlight = self.highlight
last_item = self.data[-1]
output = {}
output['app'] = self.app or 'All'
data = last_item['locales']
if self.app:
get_data = lambda x: x['apps'][self.app]['percent']
else:
get_data = lambda x: x['percent']
items = [item for item in data.items() if item[0] not in highlight]
hitems = [item for item in data.items() if item[0] in highlight]
highlighted = []
if hitems:
for loc, loc_data in sorted(hitems, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
perc = get_data(loc_data)
highlighted.append((loc, perc))
output['highlighted'] = highlighted
locales = []
for loc, loc_data in sorted(items, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
perc = get_data(loc_data)
locales.append((loc, perc))
output['locales'] = locales
output['created'] = self.created
return output
def _mark_movement(self, data):
"""For each item, converts to a tuple of (movement, item)"""
ret = []
prev_day = None
for i, day in enumerate(data):
if i == 0:
ret.append(('', day))
prev_day = day
continue
if prev_day > day:
item = ('down', day)
elif prev_day < day:
item = ('up', day)
else:
item = ('equal', day)
prev_day = day
ret.append(item)
return ret
def history(self):
self.get_data()
data = self.data
highlight = self.highlight
app = self.app
# Get a list of the locales we'll iterate through
locales = sorted(data[-1]['locales'].keys())
num_days = 14
# Truncate the data to what we want to look at
data = data[-num_days:]
if app:
get_data = lambda x: x['apps'][app]['percent']
else:
get_data = lambda x: x['percent']
hlocales = [loc for loc in locales if loc in highlight]
locales = [loc for loc in locales if loc not in highlight]
output = {}
output['app'] = self.app or 'All'
output['headers'] = [format_short_date(item['created']) for item in data]
output['highlighted'] = sorted(
(loc, self._mark_movement(get_data(day['locales'][loc]) for day in data))
for loc in hlocales
)
output['locales'] = sorted(
(loc, self._mark_movement(
get_data(day['locales'][loc]) for day in data))
for loc in locales
)
output['created'] = self.created
return output
| import requests
import sys
import time
def format_time(t):
return time.ctime(t)
def format_short_date(t):
return time.strftime('%m/%d', time.gmtime(t))
# ./bin/l10n_status.py --app=feedback --type=history --highlight=es,pt_BR,po,hu,de,gr,fr,it,ru,ja,tr,zh_TW,zh_CN https://input.mozilla.org/static/l10n_completion.json
class Status(object):
SKIP_LOCALES = ['en_US']
def __init__(self, url, app=None, highlight=None):
self.url = url
self.app = app
self.highlight = highlight or []
self.data = []
self.created = None
def get_data(self):
if self.data:
return
resp = requests.get(self.url)
if resp.status_code != 200:
resp.raise_for_status()
self.data = resp.json()
self.created = format_time(self.data[-1]['created'])
def summary(self):
"""Generates summary data of today's state"""
self.get_data()
highlight = self.highlight
last_item = self.data[-1]
output = {}
output['app'] = self.app or 'All'
data = last_item['locales']
if self.app:
get_data = lambda x: x['apps'][self.app]['percent']
else:
get_data = lambda x: x['percent']
items = [item for item in data.items() if item[0] not in highlight]
hitems = [item for item in data.items() if item[0] in highlight]
highlighted = []
if hitems:
for loc, loc_data in sorted(hitems, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
perc = get_data(loc_data)
highlighted.append((loc, perc))
output['highlighted'] = highlighted
locales = []
for loc, loc_data in sorted(items, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
perc = get_data(loc_data)
locales.append((loc, perc))
output['locales'] = locales
output['created'] = self.created
return output
def _mark_movement(self, data):
"""For each item, converts to a tuple of (movement, item)"""
ret = []
prev_day = None
for i, day in enumerate(data):
if i == 0:
ret.append(('', day))
continue
if prev_day > day:
item = ('down', day)
elif prev_day < day:
item = ('up', day)
elif day < 100:
item = ('equal', day)
prev_day = day
ret.append(item)
return ret
def history(self):
self.get_data()
data = self.data
highlight = self.highlight
app = self.app
# Get a list of the locales we'll iterate through
locales = sorted(data[-1]['locales'].keys())
num_days = 14
# Truncate the data to what we want to look at
data = data[-num_days:]
if app:
get_data = lambda x: x['apps'][app]['percent']
else:
get_data = lambda x: x['percent']
hlocales = [loc for loc in locales if loc in highlight]
locales = [loc for loc in locales if loc not in highlight]
output = {}
output['app'] = self.app or 'All'
output['headers'] = [format_short_date(item['created']) for item in data]
output['highlighted'] = sorted(
(loc, self._mark_movement(get_data(day['locales'][loc]) for day in data))
for loc in hlocales
)
output['locales'] = sorted(
(loc, self._mark_movement(
get_data(day['locales'][loc]) for day in data))
for loc in locales
)
print output['locales']
output['created'] = self.created
return output
| Python | 0.006142 |
cc13845684230565fe8bb94f2877001af67a170d | Update %%powershell help | powershellmagic.py | powershellmagic.py | """IPython magics for Windows PowerShell.
"""
__version__ = '0.1'
import atexit
import os
from subprocess import Popen, PIPE
import sys
import tempfile
from IPython.core.magic import (cell_magic, Magics, magics_class)
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring)
@magics_class
class PowerShellMagics(Magics):
"""IPython magics class for Windows PowerShell.
"""
# This class is patterned after
# IPython.core.magics.script.ScriptMagics.
def __init__(self, shell=None):
super(PowerShellMagics, self).__init__(shell=shell)
self._cell_file_name = self._powershell_tempfile()
def _powershell_tempfile(self):
tf = tempfile.NamedTemporaryFile(suffix='.ps1', delete=False)
atexit.register(self._delete_powershell_tempfile)
return tf.name
def _delete_powershell_tempfile(self):
os.remove(self._cell_file_name)
@magic_arguments()
@argument(
'--out',
type=str,
help="Redirect cell stdout to a variable."
)
@argument(
'--err',
type=str,
help="Redirect cell stderr to a variable."
)
@cell_magic
def powershell(self, line, cell):
"""Use Windows PowerShell to execute an IPython cell.
An example:
In [1]: %%powershell
...: foreach ($i in 1..3) {
...: $i
...: }
...:
1
2
3
"""
# This function is patterned after
# IPython.core.magics.ScriptMagics.shebang.
args = parse_argstring(self.powershell, line)
with open(self._cell_file_name, mode='w') as f:
f.write(cell)
cmd = 'PowerShell -ExecutionPolicy RemoteSigned -File {}\r\n'
cmd = cmd.format(self._cell_file_name)
p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate()
out = out.decode()
err = err.decode()
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush()
def load_ipython_extension(ip):
"""Load PowerShellMagics extension"""
ip.register_magics(PowerShellMagics)
| """IPython magics for Windows PowerShell.
"""
__version__ = '0.1'
import atexit
import os
from subprocess import Popen, PIPE
import sys
import tempfile
from IPython.core.magic import (cell_magic, Magics, magics_class)
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring)
@magics_class
class PowerShellMagics(Magics):
"""IPython magics class for Windows PowerShell.
"""
# This class is patterned after
# IPython.core.magics.script.ScriptMagics.
def __init__(self, shell=None):
super(PowerShellMagics, self).__init__(shell=shell)
self._cell_file_name = self._powershell_tempfile()
def _powershell_tempfile(self):
tf = tempfile.NamedTemporaryFile(suffix='.ps1', delete=False)
atexit.register(self._delete_powershell_tempfile)
return tf.name
def _delete_powershell_tempfile(self):
os.remove(self._cell_file_name)
@magic_arguments()
@argument(
'--out',
type=str,
help="Redirect stdout to a variable."
)
@argument(
'--err',
type=str,
help="Redirect stderr to a variable."
)
@cell_magic
def powershell(self, line, cell):
"""Execute a cell written in PowerShell by spawning a process
that invokes the command:
PowerShell -ExecutionPolicy RemoteSigned -File tempfile.ps1
where the argument to '-File' is a file that contains the contents
of the cell.
"""
# This function is patterned after
# IPython.core.magics.ScriptMagics.shebang.
args = parse_argstring(self.powershell, line)
with open(self._cell_file_name, mode='w') as f:
f.write(cell)
cmd = 'PowerShell -ExecutionPolicy RemoteSigned -File {}\r\n'
cmd = cmd.format(self._cell_file_name)
p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate()
out = out.decode()
err = err.decode()
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush()
def load_ipython_extension(ip):
"""Load PowerShellMagics extension"""
ip.register_magics(PowerShellMagics)
| Python | 0 |
af3ddbf32379ecf96224746eb59d0685be9586ae | Simplify compression | src/mock_vws/_mock_web_query_api.py | src/mock_vws/_mock_web_query_api.py | """
A fake implementation of the Vuforia Web Query API.
See
https://library.vuforia.com/articles/Solution/How-To-Perform-an-Image-Recognition-Query
"""
import email.utils
import gzip
import uuid
from typing import Callable, List, Set
from requests_mock import POST
from requests_mock.request import _RequestObjectProxy
from requests_mock.response import _Context
from mock_vws._constants import ResultCodes
from mock_vws._mock_common import Route, json_dump
ROUTES = set([])
def route(
path_pattern: str,
http_methods: List[str],
) -> Callable[..., Callable]:
"""
Register a decorated method so that it can be recognized as a route.
Args:
path_pattern: The end part of a URL pattern. E.g. `/targets` or
`/targets/.+`.
http_methods: HTTP methods that map to the route function.
"""
def decorator(method: Callable[..., str]) -> Callable[..., str]:
"""
Register a decorated method so that it can be recognized as a route.
Args:
method: Method to register.
Returns:
The given `method` with multiple changes, including added
validators.
"""
ROUTES.add(
Route(
route_name=method.__name__,
path_pattern=path_pattern,
http_methods=http_methods,
)
)
return method
return decorator
class MockVuforiaWebQueryAPI:
"""
A fake implementation of the Vuforia Web Query API.
This implementation is tied to the implementation of `requests_mock`.
"""
def __init__(self, ) -> None:
"""
Attributes:
routes: The `Route`s to be used in the mock.
"""
self.routes: Set[Route] = ROUTES
@route(path_pattern='/v1/query', http_methods=[POST])
def query( # pylint: disable=no-self-use
self,
request: _RequestObjectProxy, # pylint: disable=unused-argument
context: _Context, # pylint: disable=unused-argument
) -> bytes:
"""
Perform an image recognition query.
"""
results: List[str] = []
body = {
'result_code': ResultCodes.SUCCESS.value,
'results': results,
'query_id': uuid.uuid4().hex,
}
text = json_dump(body)
context.headers['Content-Encoding'] = 'gzip'
date = email.utils.formatdate(None, localtime=False, usegmt=True)
context.headers['Date'] = date
value = gzip.compress(text.encode())
context.headers['Content-Length'] = str(len(value))
return value
| """
A fake implementation of the Vuforia Web Query API.
See
https://library.vuforia.com/articles/Solution/How-To-Perform-an-Image-Recognition-Query
"""
import email.utils
import gzip
import uuid
from io import BytesIO
from typing import Callable, List, Set
from requests_mock import POST
from requests_mock.request import _RequestObjectProxy
from requests_mock.response import _Context
from mock_vws._constants import ResultCodes
from mock_vws._mock_common import Route, json_dump
ROUTES = set([])
def route(
path_pattern: str,
http_methods: List[str],
) -> Callable[..., Callable]:
"""
Register a decorated method so that it can be recognized as a route.
Args:
path_pattern: The end part of a URL pattern. E.g. `/targets` or
`/targets/.+`.
http_methods: HTTP methods that map to the route function.
"""
def decorator(method: Callable[..., str]) -> Callable[..., str]:
"""
Register a decorated method so that it can be recognized as a route.
Args:
method: Method to register.
Returns:
The given `method` with multiple changes, including added
validators.
"""
ROUTES.add(
Route(
route_name=method.__name__,
path_pattern=path_pattern,
http_methods=http_methods,
)
)
return method
return decorator
class MockVuforiaWebQueryAPI:
"""
A fake implementation of the Vuforia Web Query API.
This implementation is tied to the implementation of `requests_mock`.
"""
def __init__(self, ) -> None:
"""
Attributes:
routes: The `Route`s to be used in the mock.
"""
self.routes: Set[Route] = ROUTES
@route(path_pattern='/v1/query', http_methods=[POST])
def query( # pylint: disable=no-self-use
self,
request: _RequestObjectProxy, # pylint: disable=unused-argument
context: _Context, # pylint: disable=unused-argument
) -> str:
"""
Perform an image recognition query.
"""
results: List[str] = []
body = {
'result_code': ResultCodes.SUCCESS.value,
'results': results,
'query_id': uuid.uuid4().hex,
}
text = json_dump(body)
context.headers['Content-Encoding'] = 'gzip'
date = email.utils.formatdate(None, localtime=False, usegmt=True)
context.headers['Date'] = date
out = BytesIO()
with gzip.GzipFile(fileobj=out, mode='w') as f:
f.write(text.encode())
value = out.getvalue()
context.headers['Content-Length'] = str(len(value))
return out.getvalue()
| Python | 0.000068 |
c5ccf36fbeb6b744918e3090422763103b181de8 | Fix name (copy paste fail...) | tests/test_tripleStandardize.py | tests/test_tripleStandardize.py | import json
from ppp_nlp_classical import Triple, TriplesBucket, computeTree, simplify, buildBucket, DependenciesTree, tripleProduce1, tripleProduce2, tripleProduce3, buildTree
from ppp_datamodel import Triple, Resource, Missing
import data
from unittest import TestCase
class StandardTripleTests(TestCase):
def testBuildFromBucket(self):
tree = computeTree(data.give_president_of_USA()['sentences'][0])
qw = simplify(tree)
triple = buildTree(buildBucket(tree,qw))
self.assertIsInstance(triple,Triple)
self.assertEqual(triple.get("predicate"),Resource("identity"))
self.assertEqual(triple.get("object"),Missing())
subj=triple.get("subject")
self.assertEqual(subj.get("subject"),Missing())
self.assertEqual(subj.get("predicate"),Resource("president of"))
self.assertEqual(subj.get("object"),Resource("United States"))
| import json
from ppp_nlp_classical import Triple, TriplesBucket, computeTree, simplify, buildBucket, DependenciesTree, tripleProduce1, tripleProduce2, tripleProduce3, buildTree
from ppp_datamodel import Triple, Resource, Missing
import data
from unittest import TestCase
class StandardTripleTests(TestCase):
def testBuildBucket(self):
tree = computeTree(data.give_president_of_USA()['sentences'][0])
qw = simplify(tree)
triple = buildTree(buildBucket(tree,qw))
self.assertIsInstance(triple,Triple)
self.assertEqual(triple.get("predicate"),Resource("identity"))
self.assertEqual(triple.get("object"),Missing())
subj=triple.get("subject")
self.assertEqual(subj.get("subject"),Missing())
self.assertEqual(subj.get("predicate"),Resource("president of"))
self.assertEqual(subj.get("object"),Resource("United States"))
| Python | 0 |
fdd4a88c7a7981e8df1dd7da150b164c8121d4be | Add more youtube and vimeo links for testing | tests/testapp/test_embedding.py | tests/testapp/test_embedding.py | from django.test import TestCase
from feincms3 import embedding
class EmbeddingTest(TestCase):
def test_no_handlers(self):
"""Embed video without handlers"""
self.assertEqual(embedding.embed_video("stuff"), None)
def test_youtube(self):
"""Test a youtube link"""
self.assertEqual(
embedding.embed_video("https://www.youtube.com/watch?v=dQw4w9WgXcQ"),
"""\
<div class="responsive-embed widescreen youtube"><iframe \
src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
self.assertEqual(
embedding.embed_video("https://youtu.be/y7-s5ZvC_2A"),
"""\
<div class="responsive-embed widescreen youtube"><iframe \
src="https://www.youtube.com/embed/y7-s5ZvC_2A" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
self.assertTrue(
embedding.embed_video(
"https://www.youtube.com/watch?v=4zGnNmncJWg&feature=emb_title"
)
)
self.assertTrue(
embedding.embed_video(
"https://www.youtube.com/watch?v=DYu_bGbZiiQ&list=RDJMOOG7rWTPg&index=7"
)
)
def test_vimeo(self):
self.assertEqual(
embedding.embed_video("https://vimeo.com/455728498"),
"""\
<div class="responsive-embed widescreen vimeo"><iframe \
src="https://player.vimeo.com/video/455728498" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
self.assertTrue(
embedding.embed_video("https://player.vimeo.com/video/417955670")
)
self.assertEqual(
embedding.embed_video("https://vimeo.com/12345678/3213124324"),
"""\
<div class="responsive-embed widescreen vimeo"><iframe \
src="https://player.vimeo.com/video/12345678" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
| from django.test import TestCase
from feincms3 import embedding
class EmbeddingTest(TestCase):
def test_no_handlers(self):
"""Embed video without handlers"""
self.assertEqual(embedding.embed_video("stuff"), None)
def test_youtube(self):
"""Test a youtube link"""
self.assertEqual(
embedding.embed_video("https://www.youtube.com/watch?v=dQw4w9WgXcQ"),
"""\
<div class="responsive-embed widescreen youtube"><iframe \
src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
def test_vimeo(self):
self.assertEqual(
embedding.embed_video("https://vimeo.com/455728498"),
"""\
<div class="responsive-embed widescreen vimeo"><iframe \
src="https://player.vimeo.com/video/455728498" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
| Python | 0 |
c9d45a96236b822e2a5ca11490afdb02b9a5e699 | Drop Py2 and six on tests/unit/states/test_modjk.py | tests/unit/states/test_modjk.py | tests/unit/states/test_modjk.py | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import salt.states.modjk as modjk
from tests.support.unit import TestCase
LIST_NOT_STR = "workers should be a list not a <class 'str'>"
class ModjkTestCase(TestCase):
"""
Test cases for salt.states.modjk
"""
# 'worker_stopped' function tests: 1
def test_worker_stopped(self):
"""
Test to stop all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_stopped(name, "app1"), ret)
# 'worker_activated' function tests: 1
def test_worker_activated(self):
"""
Test to activate all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_activated(name, "app1"), ret)
# 'worker_disabled' function tests: 1
def test_worker_disabled(self):
"""
Test to disable all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_disabled(name, "app1"), ret)
# 'worker_recover' function tests: 1
def test_worker_recover(self):
"""
Test to recover all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_recover(name, "app1"), ret)
| # -*- coding: utf-8 -*-
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.states.modjk as modjk
from salt.ext import six
# Import Salt Testing Libs
from tests.support.unit import TestCase
if six.PY2:
LIST_NOT_STR = "workers should be a list not a <type 'unicode'>"
else:
LIST_NOT_STR = "workers should be a list not a <class 'str'>"
class ModjkTestCase(TestCase):
"""
Test cases for salt.states.modjk
"""
# 'worker_stopped' function tests: 1
def test_worker_stopped(self):
"""
Test to stop all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_stopped(name, "app1"), ret)
# 'worker_activated' function tests: 1
def test_worker_activated(self):
"""
Test to activate all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_activated(name, "app1"), ret)
# 'worker_disabled' function tests: 1
def test_worker_disabled(self):
"""
Test to disable all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_disabled(name, "app1"), ret)
# 'worker_recover' function tests: 1
def test_worker_recover(self):
"""
Test to recover all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_recover(name, "app1"), ret)
| Python | 0 |
bfc0b99be74731639bca3536129e96e379281716 | Simplify tests of venues POST requests | tests/unit/venues/test_views.py | tests/unit/venues/test_views.py | """Unit tests for venues.views."""
from django.urls import reverse
from mock import MagicMock
from app.venues import factories, models, views
import pytest
def test_venue_list_view(client, mocker): # noqa: D103
# GIVEN a number of venues
venues = factories.VenueFactory.build_batch(5)
mocker.patch.object(views.VenueListView, 'get_queryset', return_value=venues)
# WHEN opening the venues list
url = reverse('venues:list')
response = client.get(url)
# THEN it's there
assert response.status_code == 200
assert response.template_name[0] == 'venues/venue_list.html'
# AND shows all existing venues
for venue in venues:
assert venue.name in response.content.decode()
@pytest.mark.parametrize(
'url_name, venue_exists, template_name',
[('venues:create', False, 'model_form.html'),
('venues:update', True, 'model_form.html'),
('venues:delete', True, 'model_delete.html'),
('venues:detail', True, 'venues/venue_detail.html'),
])
def test_venue_CRUD_views_GET_request_yield_200(client, mocker, url_name, venue_exists, template_name): # noqa: D103
# GIVEN an existing venue
venue = factories.VenueFactory.build()
mocker.patch.object(views.VenueDeleteView, 'get_object', return_value=venue)
mocker.patch.object(views.VenueDetailView, 'get_object', return_value=venue)
mocker.patch.object(views.VenueUpdateView, 'get_object', return_value=venue)
# WHEN calling the view via GET request
if venue_exists:
url = reverse(url_name, args=[str(venue.id)])
else:
url = reverse(url_name)
response = client.get(url)
# THEN it's there
assert response.status_code == 200
assert response.template_name[0] == template_name
@pytest.mark.parametrize(
'url_name, venue_exists',
[('venues:create', False),
('venues:update', True),
('venues:delete', True),
])
def test_venue_CRUD_views_POST_request_redirects(client, mocker, url_name, venue_exists): # noqa: D103
# GIVEN an existing venue
venue = factories.VenueFactory.build()
mocker.patch.object(views.VenueDeleteView, 'get_object', return_value=venue)
mocker.patch.object(views.VenueDetailView, 'get_object', return_value=venue)
mocker.patch.object(views.VenueUpdateView, 'get_object', return_value=venue)
mocker.patch('app.venues.models.Venue.save', MagicMock(name="save"))
mocker.patch('app.venues.models.Venue.delete', MagicMock(name="delete"))
# # WHEN calling the view via GET request
if venue_exists:
url = reverse(url_name, args=[str(venue.id)])
else:
url = reverse(url_name)
data = {'name': 'Kaliman Bar'}
response = client.post(url, data=data)
# THEN we get redirected to the venues list
assert response.status_code == 302
assert response.url == reverse('venues:list')
| """Unit tests for venues.views."""
from django.urls import reverse
from mock import MagicMock
from app.venues import factories, models, views
import pytest
@pytest.mark.parametrize(
'url_name, venue_exists, template_name',
[('venues:create', False, 'model_form.html'),
('venues:update', True, 'model_form.html'),
('venues:delete', True, 'model_delete.html'),
('venues:detail', True, 'venues/venue_detail.html'),
])
def test_venues_views_GET_request_yield_200(client, mocker, url_name, venue_exists, template_name): # noqa: D103
# GIVEN an existing venue
venue = factories.VenueFactory.build()
mocker.patch.object(views.VenueDeleteView, 'get_object', return_value=venue)
mocker.patch.object(views.VenueDetailView, 'get_object', return_value=venue)
mocker.patch.object(views.VenueUpdateView, 'get_object', return_value=venue)
# WHEN calling the view via GET request
if venue_exists:
url = reverse(url_name, args=[str(venue.id)])
else:
url = reverse(url_name)
response = client.get(url)
# THEN it's there
assert response.status_code == 200
assert response.template_name[0] == template_name
def test_venue_list_view(client, mocker): # noqa: D103
# GIVEN a number of venues
venues = factories.VenueFactory.build_batch(5)
mocker.patch.object(views.VenueListView, 'get_queryset', return_value=venues)
# WHEN opening the venues list
url = reverse('venues:list')
response = client.get(url)
# THEN it's there
assert response.status_code == 200
assert response.template_name[0] == 'venues/venue_list.html'
# AND shows all existing venues
for venue in venues:
assert venue.name in response.content.decode()
def test_venue_create_view_POST_redirects_to_venue_list(client, mocker): # noqa: D103
# GIVEN any state
mocker.patch('app.venues.models.Venue.save', MagicMock(name="save"))
# WHEN creating a new venue via POST request
url = reverse('venues:create')
response = client.post(url, data={'name': 'Kaliman Bar'})
# THEN we get redirected to the venues list
assert response.status_code == 302
assert response.url == reverse('venues:list')
def test_venue_create_view_POST_creates_new_venue(db, client): # noqa: D103
# GIVEN an empty database
assert models.Venue.objects.count() == 0
# WHEN creating a new venue via POST request
url = reverse('venues:create')
client.post(url, data={'name': 'Roxy Bar'})
# THEN it gets saved to the database
assert models.Venue.objects.count() == 1
def test_venue_update_view_POST_redirects_to_list_view(client, mocker): # noqa: D103
# GIVEN an existing venue
venue = factories.VenueFactory.build()
mocker.patch.object(views.VenueUpdateView, 'get_object', return_value=venue)
mocker.patch('app.venues.models.Venue.save', MagicMock(name="save"))
# WHEN updating the venue via POST request
url = reverse('venues:update', args=[str(venue.id)])
response = client.post(url, data={'name': 'Roxy Bar'})
# THEN it redirects to the venues list
assert response.status_code == 302
assert response.url == reverse('venues:list')
def test_venue_delete_view_POST_redirects_to_venues_list(client, mocker): # noqa: D103
# GIVEN an existing venue
venue = factories.VenueFactory.build()
mocker.patch.object(views.VenueDeleteView, 'get_object', return_value=venue)
mocker.patch('app.venues.models.Venue.delete', MagicMock(name="delete"))
# WHEN deleting the venue via POST request
url = reverse('venues:delete', args=[str(venue.id)])
response = client.post(url)
# THEN we get redirected to the venues list
assert response.status_code == 302
assert response.url == reverse('venues:list')
| Python | 0.000007 |
0a84c767395bd8cb88711afae8bc94b045d50e78 | Remove unused import | tests/utils/test_train_utils.py | tests/utils/test_train_utils.py | from typing import Any, Dict
import numpy as np
import pytest
import rasa.utils.train_utils as train_utils
from rasa.nlu.constants import NUMBER_OF_SUB_TOKENS
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
SPLIT_ENTITIES_BY_COMMA,
)
def test_align_token_features():
tokens = [
Token("This", 0, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("is", 5, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("a", 8, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("sentence", 10, data={NUMBER_OF_SUB_TOKENS: 2}),
Token("embedding", 19, data={NUMBER_OF_SUB_TOKENS: 4}),
]
seq_dim = sum(t.get(NUMBER_OF_SUB_TOKENS) for t in tokens)
token_features = np.random.rand(1, seq_dim, 64)
actual_features = train_utils.align_token_features([tokens], token_features)
assert np.all(actual_features[0][0] == token_features[0][0])
assert np.all(actual_features[0][1] == token_features[0][1])
assert np.all(actual_features[0][2] == token_features[0][2])
# sentence is split into 2 sub-tokens
assert np.all(actual_features[0][3] == np.mean(token_features[0][3:5], axis=0))
# embedding is split into 4 sub-tokens
assert np.all(actual_features[0][4] == np.mean(token_features[0][5:10], axis=0))
@pytest.mark.parametrize(
"split_entities_config, expected_initialized_config",
[
(
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
{SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE},
),
(
{"address": False, "ingredients": True},
{
"address": False,
"ingredients": True,
SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
},
),
],
)
def test_init_split_entities_config(
split_entities_config: Any, expected_initialized_config: Dict[(str, bool)],
):
assert (
train_utils.init_split_entities(
split_entities_config, SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE
)
== expected_initialized_config
)
| from typing import Any, Dict
import numpy as np
import pytest
import rasa.utils.train_utils as train_utils
from rasa.core.policies.ted_policy import TEDPolicy
from rasa.nlu.constants import NUMBER_OF_SUB_TOKENS
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
SPLIT_ENTITIES_BY_COMMA,
)
def test_align_token_features():
tokens = [
Token("This", 0, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("is", 5, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("a", 8, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("sentence", 10, data={NUMBER_OF_SUB_TOKENS: 2}),
Token("embedding", 19, data={NUMBER_OF_SUB_TOKENS: 4}),
]
seq_dim = sum(t.get(NUMBER_OF_SUB_TOKENS) for t in tokens)
token_features = np.random.rand(1, seq_dim, 64)
actual_features = train_utils.align_token_features([tokens], token_features)
assert np.all(actual_features[0][0] == token_features[0][0])
assert np.all(actual_features[0][1] == token_features[0][1])
assert np.all(actual_features[0][2] == token_features[0][2])
# sentence is split into 2 sub-tokens
assert np.all(actual_features[0][3] == np.mean(token_features[0][3:5], axis=0))
# embedding is split into 4 sub-tokens
assert np.all(actual_features[0][4] == np.mean(token_features[0][5:10], axis=0))
@pytest.mark.parametrize(
"split_entities_config, expected_initialized_config",
[
(
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
{SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE},
),
(
{"address": False, "ingredients": True},
{
"address": False,
"ingredients": True,
SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
},
),
],
)
def test_init_split_entities_config(
split_entities_config: Any, expected_initialized_config: Dict[(str, bool)],
):
assert (
train_utils.init_split_entities(
split_entities_config, SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE
)
== expected_initialized_config
)
| Python | 0.000001 |
96dbf260a5c7bf9d5f89951f77792cf1c04d5e38 | add profiling to perf.py | perf.py | perf.py | import cProfile
import time
from parinfer import indent_mode, paren_mode
def timeProcess(string, options):
numlines = len(string.splitlines())
print "Testing file with", numlines, "lines"
t = time.clock()
indent_mode(string, options)
dt = time.clock() - t
print "Indent Mode:", dt, "s"
t = time.clock()
paren_mode(string, options)
dt = time.clock() - t
print "Paren Mode:", dt, "s"
cProfile.runctx("indent_mode(string, options)", globals(), locals())
cProfile.runctx("paren_mode(string, options)", globals(), locals())
with open('tests/really_long_file', 'r') as f:
text = f.read()
timeProcess(text, {})
| import time
from parinfer import indent_mode, paren_mode
def timeProcess(string, options):
numlines = len(string.splitlines())
print "Testing file with", numlines, "lines"
t = time.clock()
indent_mode(string, options)
dt = time.clock() - t
print "Indent Mode:", dt, "s"
t = time.clock()
paren_mode(string, options)
dt = time.clock() - t
print "Paren Mode:", dt, "s"
with open('tests/really_long_file', 'r') as f:
text = f.read()
timeProcess(text, {})
| Python | 0.000001 |
e8df708d7d926d82a7df13031aabbfb64e7347b4 | Change to work with entryparser changes | contrib/plugins/comments/xmlrpcplugins/pingback.py | contrib/plugins/comments/xmlrpcplugins/pingback.py | from config import py
from libs.pyblosxom import PyBlosxom
from libs.Request import Request
from libs import tools
import cgi, os, re, sgmllib, time, urllib
class parser(sgmllib.SGMLParser):
""" Shamelessly grabbed from Sam Ruby
from http://www.intertwingly.net/code/mombo/pingback.py
"""
""" extract title and hrefs from a web page"""
intitle=0
title = ""
hrefs = []
def do_a(self, attrs):
attrs=dict(attrs)
if attrs.has_key('href'): self.hrefs.append(attrs['href'])
def do_title(self, attrs):
if self.title=="": self.intitle=1
def unknown_starttag(self, tag, attrs):
self.intitle=0
def unknown_endtag(self,tag):
self.intitle=0
def handle_charref(self, ref):
if self.intitle: self.title = self.title + ("&#%s;" % ref)
def handle_data(self,text):
if self.intitle: self.title = self.title + text
def fileFor(req, uri):
config = req.getConfiguration()
data = req.getData()
# import plugins
import libs.plugins.__init__
libs.plugins.__init__.initialize_plugins(config)
# do start callback
tools.run_callback("start", {'request': req}, mappingfunc=lambda x,y:y)
req.addHttp({"form": cgi.FieldStorage()})
p = PyBlosxom(req)
p.startup()
data['extensions'] = tools.run_callback("entryparser",
{'txt': PyBlosxom.defaultEntryParser},
mappingfunc=lambda x,y:y,
defaultfunc=lambda x:x)
data['pi_yr'] = ''
data['pi_mo'] = ''
data['pi_da'] = ''
path_info = uri.split('/')[4:] # get rid of http and script
if path_info[0] == '':
path_info.pop(0)
p.processPathInfo(path_info)
args = { 'request': req }
es = p.defaultFileListHandler(args)
for i in es:
if i['fn'] == data['pi_frag'][1:]:
return i['file_path']
def pingback(request, source, target):
source_file = urllib.urlopen(source.split('#')[0])
source_page = parser()
source_page.feed(source_file.read())
source_file.close()
if source_page.title == "": source_page.title = source
if target in source_page.hrefs:
target_file = fileFor(request, target)
body = ''
try:
from rssfinder import getFeeds
from rssparser import parse
baseurl=source.split("#")[0]
for feed in getFeeds(baseurl):
for item in parse(feed)['items']:
if item['link']==source:
if 'title' in item: title = item['title']
if 'content_encoded' in item: body = item['content_encoded'].strip()
if 'description' in item: body = item['description'].strip() or body
body=re.compile('<.*?>',re.S).sub('',body)
body=re.sub('\s+',' ',body)
body=body[:body.rfind(' ',0,250)][:250] + " ...<br /><br />"
except:
pass
cmt = {'title':source_page.title, \
'author':'Pingback',
'pubDate' : str(time.time()), \
'link': source,
'source' : '',
'description' : body}
from libs.plugins.commentdecorator import writeComment
config = request.getConfiguration()
data = request.getData()
from libs.entries.fileentry import FileEntry
datadir = config['datadir']
entry = FileEntry(config, datadir+'/'+target_file+'.txt', datadir)
data['entry_list'] = [ entry ]
writeComment(config, data, cmt)
return "success pinging %s from %s\n" % (source, target)
else:
return "produce xmlrpc fault here"
def register_xmlrpc_methods():
return {'pingback.ping': pingback }
| from config import py
from libs.pyblosxom import PyBlosxom
from libs.Request import Request
from libs import tools
import cgi, os, re, sgmllib, time, urllib
class parser(sgmllib.SGMLParser):
""" Shamelessly grabbed from Sam Ruby
from http://www.intertwingly.net/code/mombo/pingback.py
"""
""" extract title and hrefs from a web page"""
intitle=0
title = ""
hrefs = []
def do_a(self, attrs):
attrs=dict(attrs)
if attrs.has_key('href'): self.hrefs.append(attrs['href'])
def do_title(self, attrs):
if self.title=="": self.intitle=1
def unknown_starttag(self, tag, attrs):
self.intitle=0
def unknown_endtag(self,tag):
self.intitle=0
def handle_charref(self, ref):
if self.intitle: self.title = self.title + ("&#%s;" % ref)
def handle_data(self,text):
if self.intitle: self.title = self.title + text
def fileFor(req, uri):
config = req.getConfiguration()
data = req.getData()
import libs.entryparsers.__init__
libs.entryparsers.__init__.initialize_extensions()
# import plugins
import libs.plugins.__init__
libs.plugins.__init__.initialize_plugins(config)
req.addHttp({"form": cgi.FieldStorage()})
p = PyBlosxom(req)
p.startup()
data['extensions'] = libs.entryparsers.__init__.ext
data['pi_yr'] = ''
data['pi_mo'] = ''
data['pi_da'] = ''
path_info = uri.split('/')[4:] # get rid of http and script
if path_info[0] == '':
path_info.pop(0)
p.processPathInfo(path_info)
args = { 'request': req }
es = p.defaultFileListHandler(args)
for i in es:
if i['fn'] == data['pi_frag'][1:]:
return i['file_path']
def pingback(request, source, target):
source_file = urllib.urlopen(source.split('#')[0])
source_page = parser()
source_page.feed(source_file.read())
source_file.close()
if source_page.title == "": source_page.title = source
if target in source_page.hrefs:
target_file = fileFor(request, target)
body = ''
try:
from rssfinder import getFeeds
from rssparser import parse
baseurl=source.split("#")[0]
for feed in getFeeds(baseurl):
for item in parse(feed)['items']:
if item['link']==source:
if 'title' in item: title = item['title']
if 'content_encoded' in item: body = item['content_encoded'].strip()
if 'description' in item: body = item['description'].strip() or body
body=re.compile('<.*?>',re.S).sub('',body)
body=re.sub('\s+',' ',body)
body=body[:body.rfind(' ',0,250)][:250] + " ...<br /><br />"
except:
pass
cmt = {'title':source_page.title, \
'author':'Pingback',
'pubDate' : str(time.time()), \
'link': source,
'source' : '',
'description' : body}
from libs.plugins.commentdecorator import writeComment
config = request.getConfiguration()
data = request.getData()
from libs.entries.fileentry import FileEntry
datadir = config['datadir']
entry = FileEntry(config, datadir+'/'+target_file+'.txt', datadir)
data['entry_list'] = [ entry ]
writeComment(config, data, cmt)
return "success pinging %s from %s\n" % (source, target)
else:
return "produce xmlrpc fault here"
def register_xmlrpc_methods():
return {'pingback.ping': pingback }
| Python | 0 |
dbb6fff417a3beac0db7dec603d4793eabc68a89 | bump version but not calling it stable in readme yet | psiturk/version.py | psiturk/version.py | version_number = '2.2.4'
| version_number = '2.2.3'
| Python | 0 |
e9fd001a21c594f3efd076aab73fdb3fafaa49f0 | fix typo | tests/build_utilities/native_config.xpybuild.py | tests/build_utilities/native_config.xpybuild.py | import os, glob, logging
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.utils.compilers import GCC, VisualStudio
log = logging.getLogger('xpybuild.tests.native_config')
# some basic defaults for recent default compilers for running our testcases with
if IS_WINDOWS:
VSROOT=r'c:\Program Files (x86)\Microsoft Visual Studio *'
if glob.glob(VSROOT):
VSROOT = sorted(glob.glob(VSROOT))[-1] # pick the latest one
else:
raise Exception('Cannot find Visual Studio installed in: %s'%VSROOT)
setGlobalOption('native.include', [
VSROOT+r"\VC\ATLMFC\INCLUDE",
VSROOT+r"\VC\INCLUDE",
r"C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt",
])
if not os.path.exists(r"C:\Program Files (x86)\Windows Kits\10"):
log.warning('WARN - Cannot find expected Windows Kits, got: %s'%sorted(glob.glob(r"C:\Program Files (x86)\Windows Kits\*")))
if not os.path.exists(r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt"):
log.warning('WARN - Cannot find expected Windows Kits UCRT, got: %s'%sorted(glob.glob(r"C:\Program Files (x86)\Windows Kits\10\Lib\*\*")))
setGlobalOption('native.libpaths', [
VSROOT+r"\VC\ATLMFC\LIB\amd64",
VSROOT+r"\VC\LIB\amd64",
r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64",
r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.19041.0\um\x64",
])
setGlobalOption('native.cxx.path', [
VSROOT+r"\Common7\IDE",
VSROOT+r"\VC\BIN\amd64",
VSROOT+r"\Common7\Tools",
r"c:\Windows\Microsoft.NET\Framework\v3.5",
])
setGlobalOption('native.compilers', VisualStudio(VSROOT+r'\VC\bin\amd64'))
setGlobalOption('native.cxx.flags', ['/EHa', '/GR', '/O2', '/Ox', '/Ot', '/MD', '/nologo'])
else:
setGlobalOption('native.compilers', GCC())
setGlobalOption('native.cxx.flags', ['-fPIC', '-O3', '--std=c++0x'])
| import os, glob, logging
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.utils.compilers import GCC, VisualStudio
log = logging.getLogger('xpybuild.tests.native_config')
# some basic defaults for recent default compilers for running our testcases with
if IS_WINDOWS:
VSROOT=r'c:\Program Files (x86)\Microsoft Visual Studio *'
if glob.glob(VSROOT):
VSROOT = sorted(glob.glob(VSROOT))[-1] # pick the latest one
else:
raise Exception('Cannot find Visual Studio installed in: %s'%VSROOT)
setGlobalOption('native.include', [
VSROOT+r"\VC\ATLMFC\INCLUDE",
VSROOT+r"\VC\INCLUDE",
r"C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt",
])
if not os.path.exists(r"C:\Program Files (x86)\Windows Kits\10"):
log.warning('WARN - Cannot find expected Windows Kits, got: %s'%sorted(glob.glob(r"C:\Program Files (x86)\Windows Kits\*")))
if not os.path.exists(r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrtx"):
log.warning('WARN - Cannot find expected Windows Kits UCRT, got: %s'%sorted(glob.glob(r"C:\Program Files (x86)\Windows Kits\10\Lib\*\*")))
setGlobalOption('native.libpaths', [
VSROOT+r"\VC\ATLMFC\LIB\amd64",
VSROOT+r"\VC\LIB\amd64",
r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64",
r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.19041.0\um\x64",
])
setGlobalOption('native.cxx.path', [
VSROOT+r"\Common7\IDE",
VSROOT+r"\VC\BIN\amd64",
VSROOT+r"\Common7\Tools",
r"c:\Windows\Microsoft.NET\Framework\v3.5",
])
setGlobalOption('native.compilers', VisualStudio(VSROOT+r'\VC\bin\amd64'))
setGlobalOption('native.cxx.flags', ['/EHa', '/GR', '/O2', '/Ox', '/Ot', '/MD', '/nologo'])
else:
setGlobalOption('native.compilers', GCC())
setGlobalOption('native.cxx.flags', ['-fPIC', '-O3', '--std=c++0x'])
| Python | 0.999991 |
bdaf7b8f30b6a3a493cc5246dd908bdcdff69ab8 | Increase test coverage | tests/commands/load/test_load_cnv_report_cmd.py | tests/commands/load/test_load_cnv_report_cmd.py | # -*- coding: utf-8 -*-
import os
from scout.demo import cnv_report_path
from scout.commands import cli
def test_load_cnv_report(mock_app, case_obj):
"""Testing the load delivery report cli command"""
# Make sure the path to delivery report is a valid path
assert os.path.isfile(cnv_report_path)
runner = mock_app.test_cli_runner()
assert runner
# Test CLI function
result = runner.invoke(cli, ["load", "cnv-report", case_obj["_id"], cnv_report_path, "-u"],)
assert "saved report to case!" in result.output
assert result.exit_code == 0
def test_invalid_path_load_cnv_report(mock_app, case_obj):
"""Testing the load delivery report cli command"""
runner = mock_app.test_cli_runner()
assert runner
# Test CLI function
result = runner.invoke(cli, ["load", "cnv-report", case_obj["_id"], "invalid-path", "-u"],)
assert 'Path "invalid-path" does not exist.' in result.output
assert result.exit_code == 2
| # -*- coding: utf-8 -*-
import os
from scout.demo import cnv_report_path
from scout.commands import cli
def test_load_cnv_report(mock_app, case_obj):
"""Testing the load delivery report cli command"""
# Make sure the path to delivery report is a valid path
assert os.path.isfile(cnv_report_path)
runner = mock_app.test_cli_runner()
assert runner
# Test CLI function
result = runner.invoke(
cli,
["load", "cnv-report", case_obj["_id"], cnv_report_path, "-u"],
)
assert "saved report to case!" in result.output
assert result.exit_code == 0
| Python | 0 |
a403dff24e33e6b0ef0b31b4342a9b978f9090f2 | Improve admin display | src/cerberus_ac/admin.py | src/cerberus_ac/admin.py | # -*- coding: utf-8 -*-
"""Admin module."""
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from .apps import AppSettings
from .models import (
AccessHistory, PrivilegeHistory, Role, RoleHierarchy, RolePrivilege)
# class SecurityAdmin(AdminSite):
# pass
#
#
# class DataAdmin(AdminSite):
# pass
#
#
# class AuditAdmin(AdminSite):
# pass
#
#
# security_admin_site = SecurityAdmin(name='SecurityAdmin')
# data_admin_site = DataAdmin(name='DataAdmin')
# audit_admin_site = AuditAdmin(name='AuditAdmin')
#
# Use decorator like @security_admin_site.register(AccessHistory)
# TODO: override save_model methods for history
# https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.save_model
class RoleAdmin(admin.ModelAdmin):
"""Role admin class."""
class RolePrivilegeAdmin(admin.ModelAdmin):
"""Role privilege admin class."""
class RoleHierarchyAdmin(admin.ModelAdmin):
"""Role hierarchy admin class."""
list_display = ('role_type_a', 'role_id_a', 'role_type_b', 'role_id_b')
class AccessHistoryAdmin(admin.ModelAdmin):
"""Acces history admin class."""
list_display = (
'role_type',
'role_id',
'response',
'response_type',
'access_type',
'resource_type',
'resource_id',
'datetime',
'conveyor_type',
'conveyor_id'
)
class PrivilegeHistoryAdmin(admin.ModelAdmin):
"""Privilege history admin class."""
list_display = (
'datetime',
'user',
'action',
'role_type',
'role_id',
'role_link',
'authorized',
'access_type',
'resource_type',
'resource_id',
'resource_link')
def role_link(self, obj):
instance = AppSettings.get_mapping().instance_from_name_and_id(
obj.resource_type, obj.resource_id)
info = (instance._meta.app_label, instance._meta.model_name)
admin_url = reverse('admin:%s_%s_change' % info,
args=(instance.pk,))
return mark_safe('<a href="%s">%s</a>' % (admin_url, instance))
role_link.short_description = _('Role link')
def resource_link(self, obj):
instance = AppSettings.get_mapping().instance_from_name_and_id(
obj.resource_type, obj.resource_id)
info = (instance._meta.app_label, instance._meta.model_name)
admin_url = reverse('admin:%s_%s_change' % info,
args=(instance.pk,))
return mark_safe('<a href="%s">%s</a>' % (admin_url, instance))
resource_link.short_description = _('Resource link')
# class HierarchyHistoryAdmin(admin.ModelAdmin):
# pass
admin.site.register(Role, RoleAdmin)
admin.site.register(RolePrivilege, RolePrivilegeAdmin)
admin.site.register(RoleHierarchy, RoleHierarchyAdmin)
admin.site.register(AccessHistory, AccessHistoryAdmin)
admin.site.register(PrivilegeHistory, PrivilegeHistoryAdmin)
# admin.site.register(HierarchyHistory, HierarchyHistoryAdmin)
| # -*- coding: utf-8 -*-
"""Admin module."""
from django.contrib import admin
from .models import (
AccessHistory, PrivilegeHistory, Role, RoleHierarchy, RolePrivilege)
# class SecurityAdmin(AdminSite):
# pass
#
#
# class DataAdmin(AdminSite):
# pass
#
#
# class AuditAdmin(AdminSite):
# pass
#
#
# security_admin_site = SecurityAdmin(name='SecurityAdmin')
# data_admin_site = DataAdmin(name='DataAdmin')
# audit_admin_site = AuditAdmin(name='AuditAdmin')
#
# Use decorator like @security_admin_site.register(AccessHistory)
# TODO: override save_model methods for history
# https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.save_model
class RoleAdmin(admin.ModelAdmin):
"""Role admin class."""
class RolePrivilegeAdmin(admin.ModelAdmin):
"""Role privilege admin class."""
class RoleHierarchyAdmin(admin.ModelAdmin):
"""Role hierarchy admin class."""
class AccessHistoryAdmin(admin.ModelAdmin):
"""Acces history admin class."""
class PrivilegeHistoryAdmin(admin.ModelAdmin):
"""Privilege history admin class."""
# class HierarchyHistoryAdmin(admin.ModelAdmin):
# pass
admin.site.register(Role, RoleAdmin)
admin.site.register(RolePrivilege, RolePrivilegeAdmin)
admin.site.register(RoleHierarchy, RoleHierarchyAdmin)
admin.site.register(AccessHistory, AccessHistoryAdmin)
admin.site.register(PrivilegeHistory, PrivilegeHistoryAdmin)
# admin.site.register(HierarchyHistory, HierarchyHistoryAdmin)
| Python | 0 |
b7f4696d1384f656df71332055cd4ea87f85e3c9 | Bump to v0.2.3. | luminoth/__init__.py | luminoth/__init__.py | __version__ = '0.2.3'
__title__ = 'Luminoth'
__description__ = 'Computer vision toolkit based on TensorFlow'
__uri__ = 'https://luminoth.ai'
__doc__ = __description__ + ' <' + __uri__ + '>'
__author__ = 'Tryolabs'
__email__ = 'luminoth@tryolabs.com'
__license__ = 'BSD 3-Clause License'
__copyright__ = 'Copyright (c) 2018 Tryolabs S.A.'
__min_tf_version__ = '1.5'
import sys
# Check for a current TensorFlow installation.
try:
import tensorflow # noqa: F401
except ImportError:
sys.exit("""Luminoth requires a TensorFlow >= {} installation.
Depending on your use case, you should install either `tensorflow` or
`tensorflow-gpu` packages manually or via PyPI.""".format(__min_tf_version__))
# Import functions that are part of Luminoth's public interface.
from luminoth.cli import cli # noqa
from luminoth.io import read_image # noqa
from luminoth.tasks import Detector # noqa
from luminoth.vis import vis_objects # noqa
| __version__ = '0.2.3dev0'
__title__ = 'Luminoth'
__description__ = 'Computer vision toolkit based on TensorFlow'
__uri__ = 'https://luminoth.ai'
__doc__ = __description__ + ' <' + __uri__ + '>'
__author__ = 'Tryolabs'
__email__ = 'luminoth@tryolabs.com'
__license__ = 'BSD 3-Clause License'
__copyright__ = 'Copyright (c) 2018 Tryolabs S.A.'
__min_tf_version__ = '1.5'
import sys
# Check for a current TensorFlow installation.
try:
import tensorflow # noqa: F401
except ImportError:
sys.exit("""Luminoth requires a TensorFlow >= {} installation.
Depending on your use case, you should install either `tensorflow` or
`tensorflow-gpu` packages manually or via PyPI.""".format(__min_tf_version__))
# Import functions that are part of Luminoth's public interface.
from luminoth.cli import cli # noqa
from luminoth.io import read_image # noqa
from luminoth.tasks import Detector # noqa
from luminoth.vis import vis_objects # noqa
| Python | 0 |
ea5e6ca2e6523f0b2a585112b5fd5f18e9fcf969 | add namespace | ds9/library/parser.tac | ds9/library/parser.tac | %{
%}
%token INT_
%token REAL_
%token STRING_
%token FOOCMD_
%token EXITCMD_
%token CLOSE_
%token FIT_
%token OPEN_
%token TO_
%%
commands : commands command
| command
;
int : INT_ {set _ $1}
;
command : FOOCMD_ foo
| EXITCMD_ exit
;
numeric : int {set _ $1}
| REAL_ {set _ $1}
;
foo : STRING_ {puts "STRING $1"}
| INT_ {puts "INT $1"}
| REAL_ {puts "REAL $1"}
| OPEN_ {puts "OPEN"}
| CLOSE_ {puts "CLOSE"}
| TO_ fooTo
;
fooTo: FIT_ {puts "TO FIT"}
| numeric {puts "TO NUMERIC $1"}
;
exit: {puts "EXIT"; QuitDS9}
;
%%
proc yy::yyerror {s} {
puts stderr "parse error:"
puts stderr "$yy::yy_buffer"
puts stderr [format "%*s" $yy::yy_index ^]
}
| %{
%}
%token INT_
%token REAL_
%token STRING_
%token FOOCMD_
%token EXITCMD_
%token CLOSE_
%token FIT_
%token OPEN_
%token TO_
%%
commands : commands command
| command
;
int : INT_ {set _ $1}
;
command : FOOCMD_ foo
| EXITCMD_ exit
;
numeric : int {set _ $1}
| REAL_ {set _ $1}
;
foo : STRING_ {puts "STRING $1"}
| INT_ {puts "INT $1"}
| REAL_ {puts "REAL $1"}
| OPEN_ {puts "OPEN"}
| CLOSE_ {puts "CLOSE"}
| TO_ fooTo
;
fooTo: FIT_ {puts "TO FIT"}
| numeric {puts "TO NUMERIC $1"}
;
exit: {puts "EXIT"; QuitDS9}
;
%%
proc yyerror {s} {
puts stderr "parse error:"
puts stderr "$::yy_buffer"
puts stderr [format "%*s" $::yy_index ^]
}
| Python | 0.000015 |
818d89c897603eeb33caf1ca2cdaeae5c3010880 | Use passed directory in mako engine. | engines/mako_engine.py | engines/mako_engine.py | #!/usr/bin/env python
"""Provide the mako templating engine."""
from __future__ import print_function
from mako.template import Template
from mako.lookup import TemplateLookup
from . import Engine
class MakoEngine(Engine):
"""Mako templating engine."""
handle = 'mako'
def __init__(self, template, dirname=None, tolerant=False, **kwargs):
"""Initialize mako template."""
super(MakoEngine, self).__init__(**kwargs)
directories = [dirname] if dirname is not None else ['.']
lookup = TemplateLookup(directories=directories)
default_filters = ['filter_undefined'] if tolerant else None
encoding_errors = 'replace' if tolerant else 'strict'
imports = ['def filter_undefined(value):\n'
' if value is UNDEFINED:\n'
' return \'<UNDEFINED>\'\n'
' return value\n']
self.template = Template(template,
default_filters=default_filters,
encoding_errors=encoding_errors,
imports=imports,
lookup=lookup,
strict_undefined=not tolerant,
)
def apply(self, mapping):
"""Apply a mapping of name-value-pairs to a template."""
return self.template.render(**mapping)
| #!/usr/bin/env python
"""Provide the mako templating engine."""
from __future__ import print_function
from mako.template import Template
from mako.lookup import TemplateLookup
from . import Engine
class MakoEngine(Engine):
"""Mako templating engine."""
handle = 'mako'
def __init__(self, template, tolerant=False, **kwargs):
"""Initialize mako template."""
super(MakoEngine, self).__init__(**kwargs)
default_filters = ['filter_undefined'] if tolerant else None
encoding_errors = 'replace' if tolerant else 'strict'
imports = ['def filter_undefined(value):\n'
' if value is UNDEFINED:\n'
' return \'<UNDEFINED>\'\n'
' return value\n']
lookup = TemplateLookup(directories=['.'])
self.template = Template(template,
default_filters=default_filters,
encoding_errors=encoding_errors,
imports=imports,
lookup=lookup,
strict_undefined=not tolerant,
)
def apply(self, mapping):
"""Apply a mapping of name-value-pairs to a template."""
return self.template.render(**mapping)
| Python | 0 |
cb59aaff2d120ffa5f15b822ec21d3137f90184f | Bump logistic_requisition version - fixup | logistic_requisition/__openerp__.py | logistic_requisition/__openerp__.py | # -*- coding: utf-8 -*-
#
#
# Author: Joël Grand-Guillaume, Jacques-Etienne Baudoux, Guewen Baconnier
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{"name": "Logistics Requisition",
"version": "1.4.1",
"author": "Camptocamp,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Purchase Management",
'complexity': "normal",
"images": [],
"website": "http://www.camptocamp.com",
"depends": ["sale_sourced_by_line",
"sale_owner_stock_sourcing",
"stock_dropshipping",
"purchase",
"purchase_requisition_bid_selection",
"mail",
"logistic_order",
"logistic_consignee",
"ngo_purchase",
"transport_information",
"purchase_requisition_transport_document",
"purchase_requisition_transport_multi_address",
"sale_transport_multi_address",
],
"demo": ['data/logistic_requisition_demo.xml'],
"data": ["wizard/assign_line_view.xml",
"wizard/cost_estimate_view.xml",
"wizard/logistic_requisition_cancel_view.xml",
"security/logistic_requisition.xml",
"security/ir.model.access.csv",
"data/logistic_requisition_data.xml",
"data/logistic_requisition_sequence.xml",
"view/logistic_requisition.xml",
"view/sale_order.xml",
"view/stock.xml",
"view/cancel_reason.xml",
"view/purchase_order.xml",
"view/report_logistic_requisition.xml",
"logistic_requisition_report.xml",
"data/logistic.requisition.cancel.reason.csv",
],
"test": ['test/line_assigned.yml',
'test/requisition_create_cost_estimate.yml',
'test/requisition_create_cost_estimate_only.yml',
'test/requisition_sourcing_with_tender.yml',
'test/requisition_cancel_reason.yml',
'test/logistic_requisition_report_test.yml',
],
'css': ['static/src/css/logistic_requisition.css'],
'installable': True,
'auto_install': False,
}
| # -*- coding: utf-8 -*-
#
#
# Author: Joël Grand-Guillaume, Jacques-Etienne Baudoux, Guewen Baconnier
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{"name": "Logistics Requisition",
"version": "1.4",
"author": "Camptocamp,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Purchase Management",
'complexity': "normal",
"images": [],
"website": "http://www.camptocamp.com",
"depends": ["sale_sourced_by_line",
"sale_owner_stock_sourcing",
"stock_dropshipping",
"purchase",
"purchase_requisition_bid_selection",
"mail",
"logistic_order",
"logistic_consignee",
"ngo_purchase",
"transport_information",
"purchase_requisition_transport_document",
"purchase_requisition_transport_multi_address",
"sale_transport_multi_address",
],
"demo": ['data/logistic_requisition_demo.xml'],
"data": ["wizard/assign_line_view.xml",
"wizard/cost_estimate_view.xml",
"wizard/logistic_requisition_cancel_view.xml",
"security/logistic_requisition.xml",
"security/ir.model.access.csv",
"data/logistic_requisition_data.xml",
"data/logistic_requisition_sequence.xml",
"view/logistic_requisition.xml",
"view/sale_order.xml",
"view/stock.xml",
"view/cancel_reason.xml",
"view/purchase_order.xml",
"view/report_logistic_requisition.xml",
"logistic_requisition_report.xml",
"data/logistic.requisition.cancel.reason.csv",
],
"test": ['test/line_assigned.yml',
'test/requisition_create_cost_estimate.yml',
'test/requisition_create_cost_estimate_only.yml',
'test/requisition_sourcing_with_tender.yml',
'test/requisition_cancel_reason.yml',
'test/logistic_requisition_report_test.yml',
],
'css': ['static/src/css/logistic_requisition.css'],
'installable': True,
'auto_install': False,
}
| Python | 0 |
13260904c6d34d7554eea8152ceaa2ee8601e3e9 | Add missing import | pulldb/volumes.py | pulldb/volumes.py | # Copyright 2013 Russell Heilling
from datetime import datetime, date
import logging
from math import ceil
import re
from google.appengine.api import search
from google.appengine.ext import ndb
import pycomicvine
from pulldb import base
from pulldb import publishers
from pulldb import subscriptions
from pulldb import util
from pulldb.models.admin import Setting
from pulldb.models import comicvine
from pulldb.models.volumes import Volume, volume_key
class MainPage(base.BaseHandler):
def get(self):
template_values = self.base_template_values()
template = self.templates.get_template('volumes.html')
self.response.write(template.render(template_values))
class Search(base.BaseHandler):
def get(self):
def volume_detail(comicvine_volume):
try:
volume = volume_key(comicvine_volume).get()
subscription = False
subscription_key = subscriptions.subscription_key(volume.key)
if subscription_key:
subscription = subscription_key.urlsafe()
publisher_key = volume.publisher
publisher = None
if publisher_key:
publisher = publisher_key.get()
return {
'volume_key': volume.key.urlsafe(),
'volume': volume,
'publisher': publisher,
'subscribed': bool(subscription),
}
except AttributeError:
logging.warn('Could not look up volume %r', comicvine_volume)
cv = comicvine.load()
query = self.request.get('q')
volume_ids = self.request.get('volume_ids')
page = int(self.request.get('page', 0))
limit = int(self.request.get('limit', 20))
offset = page * limit
if volume_ids:
volumes = re.findall(r'(\d+)', volume_ids)
logging.debug('Found volume ids: %r', volumes)
results = []
for index in range(0, len(volumes), 100):
volume_page = volumes[index:min([index+100, len(volumes)])]
results.append(cv.fetch_volume_batch(volume_page))
results_count = len(results)
logging.debug('Found volumes: %r' % results)
elif query:
results_count, results = cv.search_volume(query, page=page, limit=limit)
logging.debug('Found volumes: %r' % results)
if offset + limit > results_count:
page_end = results_count
else:
page_end = offset + limit
logging.info('Retrieving results %d-%d / %d', offset, page_end,
results_count)
results_page = results[offset:page_end]
template_values = self.base_template_values()
template_values.update({
'query': query,
'volume_ids': volume_ids,
'page': page,
'limit': limit,
'results': (volume_detail(volume) for volume in results_page),
'results_count': results_count,
'page_url': util.StripParam(self.request.url, 'page'),
'page_count': int(ceil(1.0*results_count/limit)),
})
template = self.templates.get_template('volumes_search.html')
self.response.write(template.render(template_values))
class RefreshVolumes(base.BaseHandler):
def get(self):
# When run from cron cycle over all issues weekly
shard_count=7
shard=date.today().weekday()
cv = comicvine.load()
refresh_callback = partial(
refresh_volume_shard, int(shard), int(shard_count), comicvine=cv)
query = Subscription.query(projection=('volume',), distinct=True)
volume_keys = query.map(refresh_callback)
update_count = sum([1 for volume in volume_keys if volume])
status = 'Updated %d/%d volumes' % (
update_count, len(volume_keys))
logging.info(status)
app = base.create_app([
('/volumes', MainPage),
('/volumes/search', Search),
])
| # Copyright 2013 Russell Heilling
from datetime import datetime
import logging
from math import ceil
import re
from google.appengine.api import search
from google.appengine.ext import ndb
import pycomicvine
from pulldb import base
from pulldb import publishers
from pulldb import subscriptions
from pulldb import util
from pulldb.models.admin import Setting
from pulldb.models import comicvine
from pulldb.models.volumes import Volume, volume_key
class MainPage(base.BaseHandler):
def get(self):
template_values = self.base_template_values()
template = self.templates.get_template('volumes.html')
self.response.write(template.render(template_values))
class Search(base.BaseHandler):
def get(self):
def volume_detail(comicvine_volume):
try:
volume = volume_key(comicvine_volume).get()
subscription = False
subscription_key = subscriptions.subscription_key(volume.key)
if subscription_key:
subscription = subscription_key.urlsafe()
publisher_key = volume.publisher
publisher = None
if publisher_key:
publisher = publisher_key.get()
return {
'volume_key': volume.key.urlsafe(),
'volume': volume,
'publisher': publisher,
'subscribed': bool(subscription),
}
except AttributeError:
logging.warn('Could not look up volume %r', comicvine_volume)
cv = comicvine.load()
query = self.request.get('q')
volume_ids = self.request.get('volume_ids')
page = int(self.request.get('page', 0))
limit = int(self.request.get('limit', 20))
offset = page * limit
if volume_ids:
volumes = re.findall(r'(\d+)', volume_ids)
logging.debug('Found volume ids: %r', volumes)
results = []
for index in range(0, len(volumes), 100):
volume_page = volumes[index:min([index+100, len(volumes)])]
results.append(cv.fetch_volume_batch(volume_page))
results_count = len(results)
logging.debug('Found volumes: %r' % results)
elif query:
results_count, results = cv.search_volume(query, page=page, limit=limit)
logging.debug('Found volumes: %r' % results)
if offset + limit > results_count:
page_end = results_count
else:
page_end = offset + limit
logging.info('Retrieving results %d-%d / %d', offset, page_end,
results_count)
results_page = results[offset:page_end]
template_values = self.base_template_values()
template_values.update({
'query': query,
'volume_ids': volume_ids,
'page': page,
'limit': limit,
'results': (volume_detail(volume) for volume in results_page),
'results_count': results_count,
'page_url': util.StripParam(self.request.url, 'page'),
'page_count': int(ceil(1.0*results_count/limit)),
})
template = self.templates.get_template('volumes_search.html')
self.response.write(template.render(template_values))
class RefreshVolumes(base.BaseHandler):
def get(self):
# When run from cron cycle over all issues weekly
shard_count=7
shard=date.today().weekday()
cv = comicvine.load()
refresh_callback = partial(
refresh_volume_shard, int(shard), int(shard_count), comicvine=cv)
query = Subscription.query(projection=('volume',), distinct=True)
volume_keys = query.map(refresh_callback)
update_count = sum([1 for volume in volume_keys if volume])
status = 'Updated %d/%d volumes' % (
update_count, len(volume_keys))
logging.info(status)
app = base.create_app([
('/volumes', MainPage),
('/volumes/search', Search),
])
| Python | 0.000466 |
d3057d336332f8315580cc7fa7c6e3d3fb1cbcc8 | Use a format-string to build the command | py3status/modules/taskwarrior.py | py3status/modules/taskwarrior.py | # -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: refresh interval for this module (default 5)
format: display format for this module (default '{task}')
filter: arguments passed to the command
(default 'start.before:today status:pending')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
SAMPLE OUTPUT
{'full_text': '1 Prepare first draft, 2 Buy milk'}
"""
import json
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
filter = 'start.before:today status:pending'
def post_config_hook(self):
if not self.py3.check_commands('task'):
raise Exception(STRING_NOT_INSTALLED)
def taskWarrior(self):
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
task_command = 'task %s export' % self.filter
task_json = json.loads(self.py3.command_output(task_command))
task_result = ', '.join(map(describeTask, task_json))
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': task_result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| # -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: refresh interval for this module (default 5)
format: display format for this module (default '{task}')
filter: arguments passed to the command
(default 'start.before:today status:pending')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
SAMPLE OUTPUT
{'full_text': '1 Prepare first draft, 2 Buy milk'}
"""
import json
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
filter = 'start.before:today status:pending'
def post_config_hook(self):
if not self.py3.check_commands('task'):
raise Exception(STRING_NOT_INSTALLED)
def taskWarrior(self):
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
task_command = 'task ' + self.filter + ' export'
task_json = json.loads(self.py3.command_output(task_command))
task_result = ', '.join(map(describeTask, task_json))
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': task_result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| Python | 0.00028 |
ae9b7bf45832b69f6a3ca94e24ed6d2e7d3a384d | Update __init__.py | gemstone/__init__.py | gemstone/__init__.py | """
Build microservices with Python
"""
from gemstone.core.microservice import MicroService
from gemstone.core.decorators import private_api_method, public_method, event_handler
from gemstone.core.handlers import TornadoJsonRpcHandler
from gemstone.client.remote_service import RemoteService
from gemstone.util import as_completed, first_completed, make_callbacks
__author__ = "Vlad Calin"
__email__ = "vlad.s.calin@gmail.com"
__version__ = "0.5.0"
__all__ = [
# core classes
'MicroService',
'RemoteService',
# decorators
'public_method',
'private_api_method',
'event_handler',
# tornado handler
'TornadoJsonRpcHandler',
# async utilities
'as_completed',
'first_completed',
'make_callbacks'
]
| """
Build microservices with Python
"""
from gemstone.core.microservice import MicroService
from gemstone.core.decorators import private_api_method, public_method, event_handler
from gemstone.core.handlers import TornadoJsonRpcHandler
from gemstone.client.remote_service import RemoteService
from gemstone.util import as_completed, first_completed, make_callbacks
__author__ = "Vlad Calin"
__email__ = "vlad.s.calin@gmail.com"
__version__ = "0.4.0"
__all__ = [
# core classes
'MicroService',
'RemoteService',
# decorators
'public_method',
'private_api_method',
'event_handler',
# tornado handler
'TornadoJsonRpcHandler',
# async utilities
'as_completed',
'first_completed',
'make_callbacks'
]
| Python | 0.000072 |
322d10c8932f160a305fab126401e3f172453e7e | Refactor prototxt generation. | generate_prototxt.py | generate_prototxt.py | #!/usr/bin/python
import os
import sys
caffe_root = os.getenv('CAFFE_ROOT', './')
sys.path.insert(0, caffe_root + '/python')
import caffe
from caffe import layers as L, params as P
def lenet(batch_size, phase):
n = caffe.NetSpec()
# empty layers as placeholders
# the resulting prototxt must be edited manually
n.data = L.Input()
n.label = L.Input()
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1, num_output=2, weight_filler=dict(type='xavier'))
if (phase == 'TRAIN'):
n.loss = L.SoftmaxWithLoss(n.score, n.label)
else if (phase == 'TEST'):
n.prob = L.Softmax(n.score)
return n.to_proto()
with open('lenet_auto_train.prototxt', 'w') as f:
f.write(str(lenet(50, 'TRAIN')))
with open('lenet_auto_test.prototxt', 'w') as f:
f.write(str(lenet(50, 'TEST')))
| #!/usr/bin/python
import os
import sys
caffe_root = os.getenv('CAFFE_ROOT', './')
sys.path.insert(0, caffe_root + '/python')
import caffe
from caffe import layers as L, params as P
def lenet():
n = caffe.NetSpec()
# empty layers as placeholders
# the resulting prototxt must be edited manually
n.data = L.Input()
n.label = L.Input()
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1, num_output=2, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
return n.to_proto()
with open('lenet_auto_train.prototxt', 'w') as f:
f.write(str(lenet()))
with open('lenet_auto_test.prototxt', 'w') as f:
f.write(str(lenet()))
| Python | 0 |
739df1a5be70f5044d7c6be357776e36ae330ce3 | Fix a bug in the OpenMP backend. | pyfr/backends/openmp/compiler.py | pyfr/backends/openmp/compiler.py | # -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import tempfile
import itertools as it
from abc import ABCMeta, abstractmethod
from ctypes import CDLL
import numpy as np
from pyfr.ctypesutil import platform_libname
from pyfr.nputil import npdtype_to_ctypestype
from pyfr.util import memoize, chdir
class SourceModule(object):
__metaclass__ = ABCMeta
_dir_seq = it.count()
def __init__(self, src, cfg):
self._src = src
self._cfg = cfg
# Create a scratch directory
tmpdir = tempfile.mkdtemp(prefix='pyfr-%d-' % next(self._dir_seq))
try:
with chdir(tmpdir):
# Compile and link the source
lname = self._build()
# Load
self._mod = CDLL(os.path.abspath(lname))
finally:
shutil.rmtree(tmpdir)
def function(self, name, restype, argtypes):
# Get the function
fn = getattr(self._mod, name)
fn.restype = npdtype_to_ctypestype(restype)
fn.argtypes = [npdtype_to_ctypestype(a) for a in argtypes]
return fn
@abstractmethod
def _build(self):
pass
class GccSourceModule(SourceModule):
def __init__(self, src, cfg):
# Find GCC (or a compatible alternative)
self._cc = cfg.getpath('backend-openmp', 'cc', 'cc', abs=False)
# Delegate
super(GccSourceModule, self).__init__(src, cfg)
def _build(self):
# File names
cn, on, ln = 'tmp.c', 'tmp.o', platform_libname('tmp')
# Write the source code out
with open(cn, 'w') as f:
f.write(self._src)
# Compile
cmd = [self._cc,
'-std=c99', # Enable C99 support
'-Ofast', # Optimise, incl. -ffast-math
'-march=native', # Use CPU-specific instructions
'-fopenmp', # Enable OpenMP support
'-fPIC', # Position-independent code for shared lib
'-c', '-o', on, cn]
out = subprocess.check_call(cmd, stderr=subprocess.STDOUT)
# Link
cmd = [self._cc,
'-shared', # Create a shared library
'-fopenmp', # Required for OpenMP
'-o', ln, on,]
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return ln
| # -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import tempfile
import itertools as it
from abc import ABCMeta, abstractmethod
from ctypes import CDLL
import numpy as np
from pyfr.ctypesutil import platform_libname
from pyfr.nputil import npdtype_to_ctypestype
from pyfr.util import memoize, chdir
class SourceModule(object):
__metaclass__ = ABCMeta
_dir_seq = it.count()
def __init__(self, src, cfg):
self._src = src
self._cfg = cfg
# Create a scratch directory
tmpdir = tempfile.mkdtemp(prefix='pyfr-%d-' % next(self._dir_seq))
try:
with chdir(tmpdir):
# Compile and link the source
lname = self._build()
# Load
self._mod = CDLL(os.path.abspath(lname))
finally:
shutil.rmtree(tmpdir)
def function(self, name, restype, argtypes):
# Get the function
fn = getattr(self._mod, name)
fn.restype = npdtype_to_ctypestype(restype)
fn.argtypes = [npdtype_to_ctypestype(a) for a in argtypes]
return fn
@abstractmethod
def _build(self):
pass
class GccSourceModule(SourceModule):
def __init__(self, src, cfg):
# Find GCC (or a compatible alternative)
self._cc = cfg.getpath('backend-c', 'cc', 'cc', abs=False)
# Delegate
super(GccSourceModule, self).__init__(src, cfg)
def _build(self):
# File names
cn, on, ln = 'tmp.c', 'tmp.o', platform_libname('tmp')
# Write the source code out
with open(cn, 'w') as f:
f.write(self._src)
# Compile
cmd = [self._cc,
'-std=c99', # Enable C99 support
'-Ofast', # Optimise, incl. -ffast-math
'-march=native', # Use CPU-specific instructions
'-fopenmp', # Enable OpenMP support
'-fPIC', # Position-independent code for shared lib
'-c', '-o', on, cn]
out = subprocess.check_call(cmd, stderr=subprocess.STDOUT)
# Link
cmd = [self._cc,
'-shared', # Create a shared library
'-fopenmp', # Required for OpenMP
'-o', ln, on,]
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return ln
| Python | 0 |
81d41ceaf89848851d8353021f01f41c724aaae8 | Add docstring to PySplunk | pygraphc/misc/splunk/pysplunk.py | pygraphc/misc/splunk/pysplunk.py | from os import system, remove
class PySplunk(object):
"""Get log clustering using Python Splunk API [SplunkDev2016]_.
References
----------
.. [SplunkDev2016] Command line examples in the Splunk SDK for Python.
http://dev.splunk.com/view/python-sdk/SP-CAAAEFK
"""
def __init__(self, username, source, host, output_mode, tmp_file='/tmp/pysplunk_cluster.csv'):
"""The constructor of class PySplunk.
Parameters
----------
username : str
Username to access Splunk daemon. No password required since we use Splunk free version.
source : str
Identifier for log source. It is usually filename of log.
host : str
Hostname for the source log.
output_mode : str
Output for clustering result. Recommended output is csv
tmp_file : str
Path for temporary clustering result.
"""
self.username = username
self.source = source.replace(' ', '\ ')
self.host = host
self.output_mode = output_mode
self.tmp_file = tmp_file
def get_splunk_cluster(self):
"""Get log clusters.
Returns
-------
clusters : dict
Dictionary of log cluster. Key: cluster_id, value: list of log line identifier.
"""
# run Python Splunk API command
command = 'python search.py --username=' + self.username + ' "search source=' + self.source + \
' host=' + self.host + ' sourcetype=linux_secure | cluster labelfield=cluster_id labelonly=t |' \
' table cluster_id _raw | sort _time | reverse" ' + '--output_mode=' + \
self.output_mode + " > " + self.tmp_file
system(command)
# get clusters
with open(self.tmp_file, 'r') as f:
logs = f.readlines()
clusters = {}
for index, log in enumerate(logs):
cluster_id = log.split(',')[0]
clusters[cluster_id] = clusters.get(cluster_id, []) + [index]
# remove tmp_file
remove(self.tmp_file)
return clusters
| from os import system, remove
class PySplunk(object):
def __init__(self, username, source, host, output_mode, tmp_file='/tmp/pysplunk_cluster.csv'):
self.username = username
self.source = source.replace(' ', '\ ')
self.host = host
self.output_mode = output_mode
self.tmp_file = tmp_file
def get_splunk_cluster(self):
# run Python Splunk API command
command = 'python search.py --username=' + self.username + ' "search source=' + self.source + \
' host=' + self.host + ' sourcetype=linux_secure | cluster labelfield=cluster_id labelonly=t |' \
' table cluster_id _raw | sort _time | reverse" ' + '--output_mode=' + \
self.output_mode + " > " + self.tmp_file
system(command)
# get clusters
with open(self.tmp_file, 'r') as f:
logs = f.readlines()
clusters = {}
for index, log in enumerate(logs):
cluster_id = log.split(',')[0]
clusters[cluster_id] = clusters.get(cluster_id, []) + [index]
# remove tmp_file
remove(self.tmp_file)
return clusters
| Python | 0.000001 |
c3d856561887c61839a3781251f36929af0e8718 | relax bool | pycaffe2/utils.py | pycaffe2/utils.py | from caffe2.proto import caffe2_pb2
from caffe.proto import caffe_pb2
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import numpy as np
def CaffeBlobToNumpyArray(blob):
return np.asarray(blob.data, dtype=np.float32).reshape(
blob.num, blob.channels, blob.height, blob.width)
def Caffe2TensorToNumpyArray(tensor):
return np.asarray(tensor.float_data, dtype=np.float32).reshape(tensor.dims)
def NumpyArrayToCaffe2Tensor(arr, name):
tensor = caffe2_pb2.TensorProto()
tensor.data_type = caffe2_pb2.TensorProto.FLOAT
tensor.name = name
tensor.dims.extend(arr.shape)
tensor.float_data.extend(list(arr.flatten().astype(float)))
return tensor
def MakeArgument(key, value):
"""Makes an argument based on the value type."""
argument = caffe2_pb2.Argument()
argument.name = key
if type(value) is float:
argument.f = value
elif type(value) is int or type(value) is bool:
# We make a relaxation that a boolean variable will also be stored as int.
argument.i = value
elif type(value) is str:
argument.s = value
elif isinstance(value, Message):
argument.s = value.SerializeToString()
elif all(type(v) is float for v in value):
argument.floats.extend(value)
elif all(type(v) is int for v in value):
argument.ints.extend(value)
elif all(type(v) is str for v in value):
argument.strings.extend(value)
elif all(isinstance(v, Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in values])
else:
raise ValueError("Unknown argument type: key=%s value=%s, value type=%s" %
(key, str(value), str(type(value))))
return argument
def TryReadProtoWithClass(cls, s):
"""Reads a protobuffer with the given proto class.
Inputs:
cls: a protobuffer class.
s: a string of either binary or text protobuffer content.
Outputs:
proto: the protobuffer of cls
Throws:
google.protobuf.message.DecodeError: if we cannot decode the message.
"""
obj = cls()
try:
text_format.Parse(s, obj)
return obj
except text_format.ParseError as e:
obj.ParseFromString(s)
return obj
def GetContentFromProto(obj, function_map):
"""Gets a specific field from a protocol buffer that matches the given class.
"""
for cls, func in function_map.iteritems():
if type(obj) is cls:
return func(obj)
def GetContentFromProtoString(s, function_map):
for cls, func in function_map.iteritems():
try:
obj = TryReadProtoWithClass(cls, s)
return func(obj)
except DecodeError:
continue
else:
raise DecodeError("Cannot find a fit protobuffer class.") | from caffe2.proto import caffe2_pb2
from caffe.proto import caffe_pb2
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import numpy as np
def CaffeBlobToNumpyArray(blob):
return np.asarray(blob.data, dtype=np.float32).reshape(
blob.num, blob.channels, blob.height, blob.width)
def Caffe2TensorToNumpyArray(tensor):
return np.asarray(tensor.float_data, dtype=np.float32).reshape(tensor.dims)
def NumpyArrayToCaffe2Tensor(arr, name):
tensor = caffe2_pb2.TensorProto()
tensor.data_type = caffe2_pb2.TensorProto.FLOAT
tensor.name = name
tensor.dims.extend(arr.shape)
tensor.float_data.extend(list(arr.flatten().astype(float)))
return tensor
def MakeArgument(key, value):
"""Makes an argument based on the value type."""
argument = caffe2_pb2.Argument()
argument.name = key
if type(value) is float:
argument.f = value
elif type(value) is int:
argument.i = value
elif type(value) is str:
argument.s = value
elif isinstance(value, Message):
argument.s = value.SerializeToString()
elif all(type(v) is float for v in value):
argument.floats.extend(value)
elif all(type(v) is int for v in value):
argument.ints.extend(value)
elif all(type(v) is str for v in value):
argument.strings.extend(value)
elif all(isinstance(v, Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in values])
else:
raise ValueError("Unknown argument type: key=%s value=%s, value type=%s" %
(key, str(value), str(type(value))))
return argument
def TryReadProtoWithClass(cls, s):
"""Reads a protobuffer with the given proto class.
Inputs:
cls: a protobuffer class.
s: a string of either binary or text protobuffer content.
Outputs:
proto: the protobuffer of cls
Throws:
google.protobuf.message.DecodeError: if we cannot decode the message.
"""
obj = cls()
try:
text_format.Parse(s, obj)
return obj
except text_format.ParseError as e:
obj.ParseFromString(s)
return obj
def GetContentFromProto(obj, function_map):
"""Gets a specific field from a protocol buffer that matches the given class.
"""
for cls, func in function_map.iteritems():
if type(obj) is cls:
return func(obj)
def GetContentFromProtoString(s, function_map):
for cls, func in function_map.iteritems():
try:
obj = TryReadProtoWithClass(cls, s)
return func(obj)
except DecodeError:
continue
else:
raise DecodeError("Cannot find a fit protobuffer class.") | Python | 0.999777 |
931c016e43402f847c6e58b4679f7f5cf132776f | add DESI_LOGLEVEL environment variable, add doc, define WARNING... in module | py/desispec/log.py | py/desispec/log.py | """
Utility functions to dump log messages
We can have something specific for DESI in the future but for now we use the standard python
"""
import sys
import logging
import os
desi_logger = None
# just for convenience to avoid importing logging
# we duplicate the logging levels
DEBUG=logging.DEBUG # Detailed information, typically of interest only when diagnosing problems.
INFO=logging.INFO # Confirmation that things are working as expected.
WARNING=logging.WARNING # An indication that something unexpected happened, or indicative of some problem
# in the near future (e.g. "disk space low"). The software is still working as expected.
ERROR=logging.ERROR # Due to a more serious problem, the software has not been able to perform some function.
CRITICAL=logging.CRITICAL # A serious error, indicating that the program itself may be unable to continue running.
# see example of usage in test/test_log.py
def get_logger(level=None) :
"""
returns a default desi logger
Args:
level: debugging level.
If level=None, will look for environment variable DESI_LOGLEVEL, accepting only values DEBUG,INFO,WARNING,ERROR.
If DESI_LOGLEVEL is not set, default level is INFO.
"""
if level is None :
desi_level=os.getenv("DESI_LOGLEVEL")
if desi_level is None :
level=INFO
else :
dico={"DEBUG":DEBUG,"INFO":INFO,"WARNING":WARNING,"ERROR":ERROR}
if dico.has_key(desi_level) :
level=dico[desi_level]
else :
# amusingly I need the logger to dump a warning here
logger=get_logger(level=WARNING)
message="ignore DESI_LOGLEVEL=%s (only recognize"%desi_level
for k in dico :
message+=" %s"%k
message+=")"
logger.warning(message)
level=INFO
global desi_logger
if desi_logger is not None :
if level is not None :
desi_logger.setLevel(level)
return desi_logger
desi_logger = logging.getLogger("DESI")
desi_logger.setLevel(level)
while len(desi_logger.handlers) > 0:
h = desi_logger.handlers[0]
desi_logger.removeHandler(h)
ch = logging.StreamHandler(sys.stdout)
#formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
formatter = logging.Formatter('%(levelname)s:%(filename)s:%(lineno)s:%(funcName)s: %(message)s')
ch.setFormatter(formatter)
desi_logger.addHandler(ch)
return desi_logger
| """
Utility functions to dump log messages
We can have something specific for DESI in the future but for now we use the standard python
"""
import sys
import logging
desi_logger = None
def get_logger(level=logging.DEBUG) :
"""
returns a default desi logger
"""
global desi_logger
if desi_logger is not None :
return desi_logger
desi_logger = logging.getLogger("DESI")
desi_logger.setLevel(level)
while len(desi_logger.handlers) > 0:
h = desi_logger.handlers[0]
desi_logger.removeHandler(h)
ch = logging.StreamHandler(sys.stdout)
#formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
formatter = logging.Formatter('%(levelname)s:%(filename)s:%(lineno)s:%(funcName)s: %(message)s')
ch.setFormatter(formatter)
desi_logger.addHandler(ch)
return desi_logger
| Python | 0 |
c8c2785b156523204e530cd78268686886ce2a37 | Fix incorrect module publics | py/oldfart/make.py | py/oldfart/make.py | import os
import re
import subprocess
__all__ = ['NOTHING_DONE', 'SUCCESS', 'NO_RULE', 'FAILURE', 'Maker']
NOTHING_DONE = 1
SUCCESS = 2
NO_RULE = 3
FAILURE = 4
class Maker(object):
def __init__(self, project_dir='.', makefile='Makefile'):
self.project_dir = os.path.abspath(project_dir)
self.makefile = os.path.abspath(os.path.join(project_dir, makefile))
def make(self, target):
"""Runs `make(1)` on `target` and returning a tuple `(status, output)`
where `status` is one of:
- `make.SUCCESS`: the target was successfully generated
- `make.NOTHING_DONE`: the target was already up-to-date
- `make.NO_RULE`: there is no rule to build the requested target
- `make.FAILURE`: `make(1)` exited otherwise with a non-zero error code
Returned `output` contains always the mixed output from `stdout` and
`stderr`.
"""
try:
capture = subprocess.check_output(
['make', '--makefile=' + self.makefile, target],
cwd=self.project_dir, stderr=subprocess.STDOUT,
universal_newlines=True)
if re.match(r"make: `[^']*' is up to date.", capture):
return (NOTHING_DONE, capture)
else:
return (SUCCESS, capture)
except subprocess.CalledProcessError as e:
if re.match(r"make: \*\*\* No rule to make target `{:s}'. Stop."
.format(target), e.output):
return (NO_RULE, e.output)
else:
return (FAILURE, e.output)
| import os
import re
import subprocess
__all__ = ['NOOP', 'SUCCESS', 'FAIL', 'Maker']
NOTHING_DONE = 1
SUCCESS = 2
NO_RULE = 3
FAILURE = 4
class Maker(object):
def __init__(self, project_dir='.', makefile='Makefile'):
self.project_dir = os.path.abspath(project_dir)
self.makefile = os.path.abspath(os.path.join(project_dir, makefile))
def make(self, target):
"""Runs `make(1)` on `target` and returning a tuple `(status, output)`
where `status` is one of:
- `make.SUCCESS`: the target was successfully generated
- `make.NOTHING_DONE`: the target was already up-to-date
- `make.NO_RULE`: there is no rule to build the requested target
- `make.FAILURE`: `make(1)` exited otherwise with a non-zero error code
Returned `output` contains always the mixed output from `stdout` and
`stderr`.
"""
try:
capture = subprocess.check_output(
['make', '--makefile=' + self.makefile, target],
cwd=self.project_dir, stderr=subprocess.STDOUT,
universal_newlines=True)
if re.match(r"make: `[^']*' is up to date.", capture):
return (NOTHING_DONE, capture)
else:
return (SUCCESS, capture)
except subprocess.CalledProcessError as e:
if re.match(r"make: \*\*\* No rule to make target `{:s}'. Stop."
.format(target), e.output):
return (NO_RULE, e.output)
else:
return (FAILURE, e.output)
| Python | 0.000025 |
89c2e367e4d691e83ccf92055c1dc8be59e05497 | use list comprehensions for legibility | pynder/session.py | pynder/session.py | from . import api
from . import models
class Session(object):
def __init__(self, facebook_id, facebook_token):
self._api = api.TinderAPI()
# perform authentication
self._api.auth(facebook_id, facebook_token)
self.profile = models.Profile(self._api.profile(), self)
def nearby_users(self):
return [models.Hopeful(u, self) for u in self._api.recs()['results']]
def update_location(self, latitude, longitude):
return self._api.ping(latitude, longitude)
def matches(self):
return [models.Match(m, self) for m in self._api.matches()]
| from . import api
from . import models
class Session(object):
def __init__(self, facebook_id, facebook_token):
self._api = api.TinderAPI()
# perform authentication
self._api.auth(facebook_id, facebook_token)
self.profile = models.Profile(self._api.profile(), self)
def nearby_users(self):
return map(lambda user: models.Hopeful(user, self),
self._api.recs()['results'])
def update_location(self, latitude, longitude):
return self._api.ping(latitude, longitude)
def matches(self):
return map(lambda match: models.Match(match, self),
self._api.matches())
| Python | 0.000001 |
042c32cbeca30da82239e7f6b9d83e88a2391dce | Fix name mangling to also replace : with _ | pybindgen/utils.py | pybindgen/utils.py | import sys
from typehandlers.codesink import CodeSink
from typehandlers.base import TypeConfigurationError, CodeGenerationError
import version
import settings
def write_preamble(code_sink, min_python_version=(2, 3)):
"""
Write a preamble, containing includes, #define's and typedef's
necessary to correctly compile the code with the given minimum python
version.
"""
assert isinstance(code_sink, CodeSink)
assert isinstance(min_python_version, tuple)
code_sink.writeln('''/* This file was generated by PyBindGen %s */
#define PY_SSIZE_T_CLEAN
#include <Python.h>
''' % '.'.join([str(x) for x in version.__version__]))
if min_python_version < (2, 4):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x02040000
#define Py_CLEAR(op) \
do { \
if (op) { \
PyObject *tmp = (PyObject *)(op); \
(op) = NULL; \
Py_DECREF(tmp); \
} \
} while (0)
#endif
''')
if min_python_version < (2, 5):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
typedef inquiry lenfunc;
typedef intargfunc ssizeargfunc;
typedef intobjargproc ssizeobjargproc;
#define Py_VISIT(op) \
do { \
if (op) { \
int vret = visit((PyObject *)(op), arg); \
if (vret) \
return vret; \
} \
} while (0)
#endif
''')
code_sink.writeln(r'''
#if __GNUC__ > 2
# define PYBINDGEN_UNUSED(param) param __attribute__((__unused__))
#elif __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
# define PYBINDGEN_UNUSED(param) __attribute__((__unused__)) param
#else
# define PYBINDGEN_UNUSED(param)
#endif /* !__GNUC__ */
''')
def get_mangled_name(base_name, template_args):
"""for internal pybindgen use"""
assert isinstance(base_name, str)
assert isinstance(template_args, (tuple, list))
if template_args:
return '%s__lt__%s__gt__' % (base_name, '_'.join(
[arg.replace(' ', '_').replace(':', '_') for arg in template_args]))
else:
return base_name
class SkipWrapper(Exception):
"""Exception that is raised to signal a wrapper failed to generate but
must simply be skipped.
for internal pybindgen use"""
def call_with_error_handling(callable, args, kwargs, wrapper,
exceptions_to_handle=(TypeConfigurationError,CodeGenerationError)):
"""for internal pybindgen use"""
if settings.error_handler is None:
return callable(*args, **kwargs)
else:
try:
return callable(*args, **kwargs)
except Exception, ex:
if isinstance(ex, exceptions_to_handle):
dummy1, dummy2, traceback = sys.exc_info()
if settings.error_handler.handle_error(wrapper, ex, traceback):
raise SkipWrapper
else:
raise
else:
raise
| import sys
from typehandlers.codesink import CodeSink
from typehandlers.base import TypeConfigurationError, CodeGenerationError
import version
import settings
def write_preamble(code_sink, min_python_version=(2, 3)):
"""
Write a preamble, containing includes, #define's and typedef's
necessary to correctly compile the code with the given minimum python
version.
"""
assert isinstance(code_sink, CodeSink)
assert isinstance(min_python_version, tuple)
code_sink.writeln('''/* This file was generated by PyBindGen %s */
#define PY_SSIZE_T_CLEAN
#include <Python.h>
''' % '.'.join([str(x) for x in version.__version__]))
if min_python_version < (2, 4):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x02040000
#define Py_CLEAR(op) \
do { \
if (op) { \
PyObject *tmp = (PyObject *)(op); \
(op) = NULL; \
Py_DECREF(tmp); \
} \
} while (0)
#endif
''')
if min_python_version < (2, 5):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
typedef inquiry lenfunc;
typedef intargfunc ssizeargfunc;
typedef intobjargproc ssizeobjargproc;
#define Py_VISIT(op) \
do { \
if (op) { \
int vret = visit((PyObject *)(op), arg); \
if (vret) \
return vret; \
} \
} while (0)
#endif
''')
code_sink.writeln(r'''
#if __GNUC__ > 2
# define PYBINDGEN_UNUSED(param) param __attribute__((__unused__))
#elif __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
# define PYBINDGEN_UNUSED(param) __attribute__((__unused__)) param
#else
# define PYBINDGEN_UNUSED(param)
#endif /* !__GNUC__ */
''')
def get_mangled_name(base_name, template_args):
"""for internal pybindgen use"""
assert isinstance(base_name, str)
assert isinstance(template_args, (tuple, list))
if template_args:
return '%s__lt__%s__gt__' % (base_name, '_'.join(
[arg.replace(' ', '_') for arg in template_args]))
else:
return base_name
class SkipWrapper(Exception):
"""Exception that is raised to signal a wrapper failed to generate but
must simply be skipped.
for internal pybindgen use"""
def call_with_error_handling(callable, args, kwargs, wrapper,
exceptions_to_handle=(TypeConfigurationError,CodeGenerationError)):
"""for internal pybindgen use"""
if settings.error_handler is None:
return callable(*args, **kwargs)
else:
try:
return callable(*args, **kwargs)
except Exception, ex:
if isinstance(ex, exceptions_to_handle):
dummy1, dummy2, traceback = sys.exc_info()
if settings.error_handler.handle_error(wrapper, ex, traceback):
raise SkipWrapper
else:
raise
else:
raise
| Python | 0.000022 |
f2a83197d9e0eaf04a3c062aa9a16c197ffd3c4d | add error logging | pyroute2/proxy.py | pyroute2/proxy.py | '''
Netlink proxy engine
'''
import errno
import struct
import logging
import traceback
import threading
class NetlinkProxy(object):
'''
Proxy schemes::
User -> NetlinkProxy -> Kernel
|
<---------+
User <- NetlinkProxy <- Kernel
'''
def __init__(self, policy='forward', nl=None, lock=None):
self.nl = nl
self.lock = lock or threading.Lock()
self.pmap = {}
self.policy = policy
def handle(self, data):
#
# match the packet
#
ptype = struct.unpack('H', data[4:6])[0]
plugin = self.pmap.get(ptype, None)
if plugin is not None:
with self.lock:
try:
ret = plugin(data, self.nl)
if ret is None:
msg = struct.pack('IHH', 40, 2, 0)
msg += data[8:16]
msg += struct.pack('I', 0)
# nlmsgerr struct alignment
msg += b'\0' * 20
return {'verdict': self.policy,
'data': msg}
else:
return ret
except Exception as e:
logging.error(traceback.format_exc())
# errmsg
if isinstance(e, (OSError, IOError)):
code = e.errno
else:
code = errno.ECOMM
msg = struct.pack('HH', 2, 0)
msg += data[8:16]
msg += struct.pack('I', code)
msg += data
msg = struct.pack('I', len(msg) + 4) + msg
return {'verdict': 'error',
'data': msg}
return None
| '''
Netlink proxy engine
'''
import errno
import struct
import threading
class NetlinkProxy(object):
'''
Proxy schemes::
User -> NetlinkProxy -> Kernel
|
<---------+
User <- NetlinkProxy <- Kernel
'''
def __init__(self, policy='forward', nl=None, lock=None):
self.nl = nl
self.lock = lock or threading.Lock()
self.pmap = {}
self.policy = policy
def handle(self, data):
#
# match the packet
#
ptype = struct.unpack('H', data[4:6])[0]
plugin = self.pmap.get(ptype, None)
if plugin is not None:
with self.lock:
try:
ret = plugin(data, self.nl)
if ret is None:
msg = struct.pack('IHH', 40, 2, 0)
msg += data[8:16]
msg += struct.pack('I', 0)
# nlmsgerr struct alignment
msg += b'\0' * 20
return {'verdict': self.policy,
'data': msg}
else:
return ret
except Exception as e:
# errmsg
if isinstance(e, (OSError, IOError)):
code = e.errno
else:
code = errno.ECOMM
msg = struct.pack('HH', 2, 0)
msg += data[8:16]
msg += struct.pack('I', code)
msg += data
msg = struct.pack('I', len(msg) + 4) + msg
return {'verdict': 'error',
'data': msg}
return None
| Python | 0.000004 |
82379ddc145673cca008127099e4bdc2e0aa503b | make sure we won't traverse 'None' object | pyswagger/scan.py | pyswagger/scan.py | from __future__ import absolute_import
from .base import BaseObj
from .utils import scope_compose
import six
def default_tree_traversal(root):
""" default tree traversal """
objs = [(None, None, root)]
while len(objs) > 0:
scope, name, obj = objs.pop()
# get children
new_scope = scope_compose(scope, name)
objs.extend(map(lambda c: (new_scope,) + c, obj._children_))
yield scope, name, obj
class DispatcherMeta(type):
""" metaclass for Dispatcher
"""
def __new__(metacls, name, bases, spc):
if 'obj_route' not in spc.keys():
# forcely create a new obj_route
# but not share the same one with parents.
spc['obj_route'] = {}
spc['result_fn'] = [None]
return type.__new__(metacls, name, bases, spc)
class Dispatcher(six.with_metaclass(DispatcherMeta, object)):
""" Dispatcher
"""
obj_route = {}
result_fn = [None]
@classmethod
def __add_route(cls, t, f):
"""
"""
if not issubclass(t, BaseObj):
raise ValueError('target_cls should be a subclass of BaseObj, but got:' + str(t))
# allow register multiple handler function
# against one object
if t in cls.obj_route.keys():
cls.obj_route[t].append(f)
else:
cls.obj_route[t] = [f]
@classmethod
def register(cls, target):
"""
"""
def outer_fn(f):
# what we did is simple,
# register target_cls as key, and f as callback
# then keep this record in cls.
for t in target:
cls.__add_route(t, f)
# nothing is decorated. Just return original one.
return f
return outer_fn
@classmethod
def result(cls, f):
"""
"""
# avoid bound error
cls.result_fn = [f]
return f
class Scanner(object):
""" Scanner
"""
def __init__(self, app):
super(Scanner, self).__init__()
self.__app = app
@property
def app(self):
return self.__app
def __build_route(self, route):
"""
"""
ret = []
for r in route:
for attr in r.__class__.__dict__:
o = getattr(r, attr)
if type(o) == DispatcherMeta:
ret.append((r, o.obj_route, o.result_fn[0]))
return ret
def scan(self, route, root, nexter=default_tree_traversal):
"""
"""
if root == None:
raise ValueError('Can\'t scan because root==None')
merged_r = self.__build_route(route)
for scope, name, obj in nexter(root):
for the_self, r, res in merged_r:
def handle_cls(cls):
f = r.get(cls, None)
if f:
for ff in f:
ret = ff(the_self, scope, name, obj, self.app)
if res:
res(the_self, ret)
for cls in obj.__class__.__mro__[:-1]:
if cls is BaseObj:
break
handle_cls(cls)
| from __future__ import absolute_import
from .base import BaseObj
from .utils import scope_compose
import six
def default_tree_traversal(root):
""" default tree traversal """
objs = [(None, None, root)]
while len(objs) > 0:
scope, name, obj = objs.pop()
# get children
new_scope = scope_compose(scope, name)
objs.extend(map(lambda c: (new_scope,) + c, obj._children_))
yield scope, name, obj
class DispatcherMeta(type):
""" metaclass for Dispatcher
"""
def __new__(metacls, name, bases, spc):
if 'obj_route' not in spc.keys():
# forcely create a new obj_route
# but not share the same one with parents.
spc['obj_route'] = {}
spc['result_fn'] = [None]
return type.__new__(metacls, name, bases, spc)
class Dispatcher(six.with_metaclass(DispatcherMeta, object)):
""" Dispatcher
"""
obj_route = {}
result_fn = [None]
@classmethod
def __add_route(cls, t, f):
"""
"""
if not issubclass(t, BaseObj):
raise ValueError('target_cls should be a subclass of BaseObj, but got:' + str(t))
# allow register multiple handler function
# against one object
if t in cls.obj_route.keys():
cls.obj_route[t].append(f)
else:
cls.obj_route[t] = [f]
@classmethod
def register(cls, target):
"""
"""
def outer_fn(f):
# what we did is simple,
# register target_cls as key, and f as callback
# then keep this record in cls.
for t in target:
cls.__add_route(t, f)
# nothing is decorated. Just return original one.
return f
return outer_fn
@classmethod
def result(cls, f):
"""
"""
# avoid bound error
cls.result_fn = [f]
return f
class Scanner(object):
""" Scanner
"""
def __init__(self, app):
super(Scanner, self).__init__()
self.__app = app
@property
def app(self):
return self.__app
def __build_route(self, route):
"""
"""
ret = []
for r in route:
for attr in r.__class__.__dict__:
o = getattr(r, attr)
if type(o) == DispatcherMeta:
ret.append((r, o.obj_route, o.result_fn[0]))
return ret
def scan(self, route, root, nexter=default_tree_traversal):
"""
"""
merged_r = self.__build_route(route)
for scope, name, obj in nexter(root):
for the_self, r, res in merged_r:
def handle_cls(cls):
f = r.get(cls, None)
if f:
for ff in f:
ret = ff(the_self, scope, name, obj, self.app)
if res:
res(the_self, ret)
for cls in obj.__class__.__mro__[:-1]:
if cls is BaseObj:
break
handle_cls(cls)
| Python | 0.99859 |
060f7400f27f0452eb4ed11ffed07aab16230126 | Update cts link | pynayzr/streams.py | pynayzr/streams.py | # -*- coding: utf-8 -*-
import os
import asyncio
import subprocess
import tempfile
from PIL import Image
support_news = {
'ttv': 'https://www.youtube.com/watch?v=yk2CUjbyyQY',
'ctv': 'https://www.youtube.com/watch?v=hVNbIZYi1nI',
'cts': 'https://www.youtube.com/watch?v=TL8mmew3jb8',
'pts': 'https://www.youtube.com/watch?v=_isseGKrquc',
'ebc': 'https://www.youtube.com/watch?v=dxpWqjvEKaM',
'cti': 'https://www.youtube.com/watch?v=wUPPkSANpyo',
'ftv': 'https://www.youtube.com/watch?v=XxJKnDLYZz4',
'set': 'https://www.youtube.com/watch?v=4ZVUmEUFwaY',
'tvbs': 'https://www.youtube.com/watch?v=Hu1FkdAOws0'
}
def get(news):
"""Get Livestream frame by news media.
Args:
news (str): news media list in support_news
Returns:
Image.Image: PIL Image instance
"""
if news not in support_news:
raise KeyError
# Other news using youtube
with tempfile.TemporaryDirectory() as temp_dir:
streamlink = [
'streamlink',
'-O',
support_news[news],
'720p'
]
ffmpeg = [
'ffmpeg',
'-i',
'-',
'-f',
'image2',
'-vframes',
'1',
'%s/out.jpg' % (temp_dir)
]
p1 = subprocess.Popen(streamlink, stderr=subprocess.DEVNULL, stdout=subprocess.PIPE)
p2 = subprocess.Popen(ffmpeg, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
return Image.open('%s/out.jpg' % (temp_dir))
async def aget(news):
"""Async get livestream frame by news media.
Args:
news (str): news media list in support_news
Returns:
Image.Image: PIL Image instance
"""
if news not in support_news:
raise KeyError
# Other news using youtube
with tempfile.TemporaryDirectory() as temp_dir:
streamlink = ' '.join([
'streamlink',
'-O',
support_news[news],
'720p'
])
ffmpeg = ' '.join([
'ffmpeg',
'-i',
'-',
'-f',
'image2',
'-vframes',
'1',
'%s/out.jpg' % (temp_dir)
])
read, write = os.pipe()
p1 = await asyncio.create_subprocess_shell(
streamlink,
stdout=write,
stderr=asyncio.subprocess.DEVNULL)
os.close(write)
p2 = await asyncio.create_subprocess_shell(
ffmpeg,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL,
stdin=read)
os.close(read)
await p1.communicate()
await p2.communicate()
return Image.open('%s/out.jpg' % (temp_dir))
async def aget_all():
async def mark(key, coro):
return key, await coro
d = {news: aget(news) for news in support_news}
return {
key: result
for key, result in await asyncio.gather(
*(mark(key, coro) for key, coro in d.items()))
}
| # -*- coding: utf-8 -*-
import os
import asyncio
import subprocess
import tempfile
from PIL import Image
support_news = {
'ttv': 'https://www.youtube.com/watch?v=yk2CUjbyyQY',
'ctv': 'https://www.youtube.com/watch?v=XBne4oJGEhE',
'cts': 'https://www.youtube.com/watch?v=TL8mmew3jb8',
'pts': 'https://www.youtube.com/watch?v=_isseGKrquc',
'ebc': 'https://www.youtube.com/watch?v=dxpWqjvEKaM',
'cti': 'https://www.youtube.com/watch?v=wUPPkSANpyo',
'ftv': 'https://www.youtube.com/watch?v=XxJKnDLYZz4',
'set': 'https://www.youtube.com/watch?v=4ZVUmEUFwaY',
'tvbs': 'https://www.youtube.com/watch?v=Hu1FkdAOws0'
}
def get(news):
"""Get Livestream frame by news media.
Args:
news (str): news media list in support_news
Returns:
Image.Image: PIL Image instance
"""
if news not in support_news:
raise KeyError
# Other news using youtube
with tempfile.TemporaryDirectory() as temp_dir:
streamlink = [
'streamlink',
'-O',
support_news[news],
'720p'
]
ffmpeg = [
'ffmpeg',
'-i',
'-',
'-f',
'image2',
'-vframes',
'1',
'%s/out.jpg' % (temp_dir)
]
p1 = subprocess.Popen(streamlink, stderr=subprocess.DEVNULL, stdout=subprocess.PIPE)
p2 = subprocess.Popen(ffmpeg, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
return Image.open('%s/out.jpg' % (temp_dir))
async def aget(news):
"""Async get livestream frame by news media.
Args:
news (str): news media list in support_news
Returns:
Image.Image: PIL Image instance
"""
if news not in support_news:
raise KeyError
# Other news using youtube
with tempfile.TemporaryDirectory() as temp_dir:
streamlink = ' '.join([
'streamlink',
'-O',
support_news[news],
'720p'
])
ffmpeg = ' '.join([
'ffmpeg',
'-i',
'-',
'-f',
'image2',
'-vframes',
'1',
'%s/out.jpg' % (temp_dir)
])
read, write = os.pipe()
p1 = await asyncio.create_subprocess_shell(
streamlink,
stdout=write,
stderr=asyncio.subprocess.DEVNULL)
os.close(write)
p2 = await asyncio.create_subprocess_shell(
ffmpeg,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL,
stdin=read)
os.close(read)
await p1.communicate()
await p2.communicate()
return Image.open('%s/out.jpg' % (temp_dir))
async def aget_all():
async def mark(key, coro):
return key, await coro
d = {news: aget(news) for news in support_news}
return {
key: result
for key, result in await asyncio.gather(
*(mark(key, coro) for key, coro in d.items()))
}
| Python | 0 |
1aa687b70aea9074ae28d1154ae0db4364add26e | Rewrite example.py. | pyoommf/example.py | pyoommf/example.py | from sim import Sim
from mesh import Mesh
from exchange import Exchange
from demag import Demag
from zeeman import Zeeman
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = Mesh(lx, ly, lz, dx, dy, dz)
# Create a simulation object.
sim = Sim(mesh, Ms)
# Add energies.
sim.add(Exchange(A))
sim.add(Demag())
sim.add(Zeeman(H))
sim.create_mif()
"""
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
# Get the results.
results = sim.result()
"""
| import sim, mesh
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = mesh.Mesh(lx, ly, lz, dx, dy, dz)
# Create a simulation object.
sim = sim.Sim(mesh, Ms)
# Add energies.
sim.add_exchange(A)
sim.add_demag()
sim.add_zeeman(H)
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
# Get the results.
results = sim.result()
| Python | 0.000003 |
50ae1d3fb7e14fec94831a4c6667c7b1ba2e073b | add python script | python/python2.py | python/python2.py | #!/usr/bin/python2
# -*- coding: UTF-8 -*-
# install
# sudo pip install pymongo
# sudo pip install MySQL-python
# sudo install_name_tool -change libmysqlclient.18.dylib /usr/local/mysql/lib/libmysqlclient.18.dylib /Library/Python/2.7/site-packages/_mysql.so
# sudo pip install requests
# sudo pip install threadpool
# sudo pip install apscheduler
# command
python -V
import sys, pprint
pprint.pprint(sys.path)
dir(copy)
help(copy.copy)
print copy.__doc__
print copy.__file__
import webbrowser
webbrowser.open("http://www.baidu.com")
import urllib
html = urllib.urlopen("http://www.baidu.com")
temp_file = urllib.urlretrieve("http://www.baidu.com")
urllib.urlcleanup()
# script
for letter in 'Python':
print 'current letter:', letter
fruits = ['banana', 'apple', 'mango']
for index in range(len(fruits)):
print 'current fruit:', fruits[index]
with open("/tmp/file.txt") as file:
do(file)
f = open(filename)
for line in f.readlines():
process(line)
f.close()
import fileinput
for line in fileinput.input(line):
process(line)
f = open(filename)
for line in f:
process(line)
f.close()
| #!/usr/bin/python2
# -*- coding: UTF-8 -*-
# install
# sudo pip install pymongo
# sudo pip install MySQL-python
# sudo install_name_tool -change libmysqlclient.18.dylib /usr/local/mysql/lib/libmysqlclient.18.dylib /Library/Python/2.7/site-packages/_mysql.so
# sudo pip install requests
# sudo pip install threadpool
# sudo pip install apscheduler
# command
python -V
import sys, pprint
pprint.pprint(sys.path)
dir(copy)
help(copy.copy)
print copy.__doc__
print copy.__file__
import webbrowser
webbrowser.open("http://www.baidu.com")
# script
for letter in 'Python':
print 'current letter:', letter
fruits = ['banana', 'apple', 'mango']
for index in range(len(fruits)):
print 'current fruit:', fruits[index]
with open("/tmp/file.txt") as file:
do(file)
f = open(filename)
for line in f.readlines():
process(line)
f.close()
import fileinput
for line in fileinput.input(line):
process(line)
f = open(filename)
for line in f:
process(line)
f.close()
| Python | 0.000019 |
c0cbc2458c42bfb116c0d631c837f042f66d33a8 | Add explanatory comments to Python varargs script | python/varargs.py | python/varargs.py | def f(x, y=1000, *z):
print('x={} y={} z={}'.format(x,y,z))
f(0) # x=0, y=1000, z=()
f(0,1) # x=0, y=1, z=()
f(0,1,2) # x=0, y=1, z=(2,)
f(0,1,2,3) # x=0, y=1, z=(2,3)
f(0,1,2,3,4) # x=0, y=1, z=(2,3,4)
f(*[i for i in range(6)]) # x=0, y=1, z=(2,3,4,5)
f(*range(7)) # x=0, y=1, z=(2,3,4,5,6)
| def f(x, y=1000, *z):
print('x={} y={} z={}'.format(x,y,z))
f(0)
f(0,1)
f(0,1,2)
f(0,1,2,3)
f(0,1,2,3,4)
f(*[i for i in range(6)])
f(*range(7))
| Python | 0 |
04c3cac3054626773bc0434453378cb295f7e38c | Add handling of invalid values | pytus2000/read.py | pytus2000/read.py | import pandas as pd
from .datadicts import diary
def read_diary_file(path_to_file):
return pd.read_csv(
path_to_file,
delimiter='\t',
converters=_column_name_to_type_mapping(diary),
low_memory=False # some columns seem to have mixed types
)
def _column_name_to_type_mapping(module):
mapping = {}
for member in module.Variable:
try:
module.__dict__[member.name]
mapping[member.name] = _enum_converter(module.__dict__[member.name])
except KeyError:
pass # nothing to do; there is no enum
return mapping
def _enum_converter(enumcls):
def enum_converter(value):
if value == ' ':
return None
else:
try:
value = enumcls(value)
except ValueError as ve:
print(ve)
return None
else:
return value
return enum_converter
| import pandas as pd
from .datadicts import diary
def read_diary_file(path_to_file):
return pd.read_csv(
path_to_file,
delimiter='\t',
nrows=50,
converters=_column_name_to_type_mapping(diary),
low_memory=False # some columns seem to have mixed types
)
def _column_name_to_type_mapping(module):
mapping = {}
for member in module.Variable:
try:
module.__dict__[member.name]
mapping[member.name] = _enum_converter(module.__dict__[member.name])
except KeyError:
pass # nothing to do; there is no enum
return mapping
def _enum_converter(enumcls):
def enum_converter(value):
if value == ' ':
return None
else:
return enumcls(value)
return enum_converter
| Python | 0.000007 |
fd421a4c5f7cdacdc98aa049b4650c9d1d62267a | Fix some issues with open. | grit/command/Open.py | grit/command/Open.py | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import platform
import random
from grit import Call
from grit import Git
from grit import GitRoot
from grit import Settings
from grit.String import startswith
HELP = """
grit open [filename]
Open the filename as a Github URL in the browser.
Selects the first file that starts with filename. If filename is missing,
opens the current directory in the browser.
"""
"""
What should we be able to open?
* The current directory.
* A file.
* A found file.
in
* our repo
* the upstream repo
* some other repo.
And:
* A pull request.
* the pull request for this branch, if any.
"""
SAFE = True
_OPEN_COMMANDS = {
'Darwin': 'open',
'Linux': 'xdg-open',
}
_URL = 'https://github.com/{user}/{project}/tree/{branch}/{path}'
def open_url(branch, path,
project=Settings.PROJECT,
user=Settings.USER):
path = os.path.relpath(path, GitRoot.ROOT)
u = _URL.format(branch=branch, path=path, project=project, user=user)
Call.call('%s %s' % (_OPEN_COMMANDS[platform.system()], u))
def open(filename=''):
if not platform.system() in _OPEN_COMMANDS:
raise ValueError("Can't open a URL for platform.system() = " + plat)
branch = Git.branch()
full_path = os.getcwd()
if filename:
path, f = os.path.split(filename)
full_path = os.path.join(full_path, path)
if not os.path.exists(full_path):
raise ValueError("Path %s doesn't exist." % full_path)
if f:
for p in os.listdir(full_path):
if startswith(p, f):
full_path = os.path.join(full_path, p)
break
else:
raise ValueError("Can't find file matching " + filename)
open_url(branch=Git.branch(), path=full_path)
| from __future__ import absolute_import, division, print_function, unicode_literals
import os
import platform
import random
from grit import Call
from grit import Git
from grit import GitRoot
from grit import Settings
from grit.String import startswith
HELP = """
grit open [filename]
Open the filename as a Github URL in the browser.
Selects the first file that starts with filename. If filename is missing,
opens the current directory in the browser.
"""
"""
What should we be able to open?
* The current directory.
* A file.
* A found file.
in
* our repo
* the upstream repo
* some other repo.
And:
* A pull request.
* the pull request for this branch, if any.
"""
SAFE = True
_OPEN_COMMANDS = {
'Darwin': 'open',
'Linux': 'xdg-open',
}
_URL = 'https://github.com/{user}/{project}/tree/{branch}/{path}'
def open_url(url):
Call.call('%s %s' % (_OPEN_COMMANDS[platform.system()], url))
def open(filename=''):
if not platform.system() in _OPEN_COMMANDS:
raise ValueError("Can't open a URL for platform.system() = " + plat)
branch = Git.branch()
full_path = os.getcwd()
if filename:
path, f = os.path.split(filename)
full_path = os.path.join(full_path, path)
if not os.path.exists(full_path):
raise ValueError("Path %s doesn't exist." % full_path)
if f:
for p in os.listdir(full_path):
if startswith(p, f):
full_path = os.path.join(full_path, p)
break
else:
raise ValueError("Can't find file matching " + filename)
url = _URL.format(
branch=Git.branch(),
path=os.path.relpath(full_path, GitRoot.ROOT),
project=Settings.PROJECT,
user=Settings.USER)
open_url(url)
| Python | 0 |
ace38408875e31e1dfc8e6b1f2e2bf956fffc761 | - message type 'groupchat' is valid | pyxmpp/message.py | pyxmpp/message.py | #
# (C) Copyright 2003 Jacek Konieczny <jajcus@bnet.pl>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import libxml2
from stanza import Stanza,StanzaError
from utils import to_utf8,from_utf8
message_types=("normal","chat","headline","error","groupchat")
class Message(Stanza):
stanza_type="message"
def __init__(self,node=None,**kw):
self.node=None
if isinstance(node,Message):
pass
elif isinstance(node,Stanza):
raise TypeError,"Couldn't make Message from other Stanza"
elif isinstance(node,libxml2.xmlNode):
pass
elif node is not None:
raise TypeError,"Couldn't make Message from %r" % (type(node),)
if kw.has_key("type") and kw["type"] and kw["type"] not in message_types:
raise StanzaError,"Invalid message type: %r" % (kw["type"],)
if kw.has_key("body"):
body=kw["body"]
del kw["body"]
else:
body=None
if kw.has_key("subject"):
subject=kw["subject"]
del kw["subject"]
else:
subject=None
if kw.has_key("thread"):
thread=kw["thread"]
del kw["thread"]
else:
thread=None
if node is None:
node="message"
apply(Stanza.__init__,[self,node],kw)
if subject is not None:
self.node.newTextChild(None,"subject",to_utf8(subject))
if body is not None:
self.node.newTextChild(None,"body",to_utf8(body))
if thread is not None:
self.node.newTextChild(None,"thread",to_utf8(thread))
def get_subject(self):
n=self.xpath_eval("subject")
if n:
return from_utf8(n[0].getContent())
else:
return None
def get_thread(self):
n=self.xpath_eval("thread")
if n:
return from_utf8(n[0].getContent())
else:
return None
def copy(self):
return Message(self)
def get_body(self):
n=self.xpath_eval("body")
if n:
return from_utf8(n[0].getContent())
else:
return None
def make_error_response(self,cond):
if self.get_type() == "error":
raise StanzaError,"Errors may not be generated in response to errors"
m=Message(type="error",fr=self.get_to(),to=self.get_from(),
id=self.get_id(),error_cond=cond)
if self.node.children:
for n in list(self.node.children):
n=n.copyNode(1)
m.node.children.addPrevSibling(n)
return m
| #
# (C) Copyright 2003 Jacek Konieczny <jajcus@bnet.pl>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import libxml2
from stanza import Stanza,StanzaError
from utils import to_utf8,from_utf8
message_types=("normal","chat","headline","error")
class Message(Stanza):
stanza_type="message"
def __init__(self,node=None,**kw):
self.node=None
if isinstance(node,Message):
pass
elif isinstance(node,Stanza):
raise TypeError,"Couldn't make Message from other Stanza"
elif isinstance(node,libxml2.xmlNode):
pass
elif node is not None:
raise TypeError,"Couldn't make Message from %r" % (type(node),)
if kw.has_key("type") and kw["type"] and kw["type"] not in message_types:
raise StanzaError,"Invalid message type: %r" % (kw["type"],)
if kw.has_key("body"):
body=kw["body"]
del kw["body"]
else:
body=None
if kw.has_key("subject"):
subject=kw["subject"]
del kw["subject"]
else:
subject=None
if kw.has_key("thread"):
thread=kw["thread"]
del kw["thread"]
else:
thread=None
if node is None:
node="message"
apply(Stanza.__init__,[self,node],kw)
if subject is not None:
self.node.newTextChild(None,"subject",to_utf8(subject))
if body is not None:
self.node.newTextChild(None,"body",to_utf8(body))
if thread is not None:
self.node.newTextChild(None,"thread",to_utf8(thread))
def get_subject(self):
n=self.xpath_eval("subject")
if n:
return from_utf8(n[0].getContent())
else:
return None
def get_thread(self):
n=self.xpath_eval("thread")
if n:
return from_utf8(n[0].getContent())
else:
return None
def copy(self):
return Message(self)
def get_body(self):
n=self.xpath_eval("body")
if n:
return from_utf8(n[0].getContent())
else:
return None
def make_error_response(self,cond):
if self.get_type() == "error":
raise StanzaError,"Errors may not be generated in response to errors"
m=Message(type="error",fr=self.get_to(),to=self.get_from(),
id=self.get_id(),error_cond=cond)
if self.node.children:
for n in list(self.node.children):
n=n.copyNode(1)
m.node.children.addPrevSibling(n)
return m
| Python | 0.998913 |
dbde102d14632bbaef7d6319d0742ac2819d6e38 | Implement the given spec. | mlab-ns-simulator/mlabsim/update.py | mlab-ns-simulator/mlabsim/update.py | """
This approximates the mlab-ns slice information gathering. The actual
system uses nagios and we're not certain about the details. This much
simplified version is just a web URL anyone may PUT data into.
Warning: This doesn't have any security properties! We need a way to
prevent the addition of malicious entries.
"""
import logging
import json
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET
DBEntryNames = [
'city',
'country',
'fqdn',
'ip',
'port',
'site',
'tool_extra',
]
class UpdateResource (resource.Resource):
def __init__(self, db):
"""db is a dict which will be modified to map { fqdn -> other_details }"""
resource.Resource.__init__(self)
self._db = db
self._log = logging.getLogger(type(self).__name__)
def render_PUT(self, request):
body = request.content.read()
self._log.debug('Request body: %r', body)
try:
dbentry = json.loads(body)
except ValueError:
self._send_response(request, 400, 'invalid', 'Malformed JSON body.')
return NOT_DONE_YET
try:
fqdn = dbentry['fqdn']
except KeyError:
self._send_response(request, 400, 'invalid', "Missing 'fqdn' field.")
return NOT_DONE_YET
self._db[fqdn] = dbentry
self._send_response(request, 200, 'ok', 'Ok.')
return NOT_DONE_YET
def _send_response(self, request, code, status, message):
request.setResponseCode(code, status)
request.setHeader('content-type', 'text/plain')
request.write(message)
request.finish()
| """
This approximates the mlab-ns slice information gathering. The actual
system uses nagios and we're not certain about the details. This much
simplified version is just a web URL anyone may PUT data into.
Warning: This doesn't have any security properties! We need a way to
prevent the addition of malicious entries.
"""
import logging
import json
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET
DBEntryNames = [
'city',
'country',
'fqdn',
'ip',
'port',
'site',
'tool_extra',
]
class UpdateResource (resource.Resource):
def __init__(self, db):
"""db is a dict which will be modified to map { fqdn -> other_details }"""
resource.Resource.__init__(self)
self._db = db
self._log = logging.getLogger(type(self).__name__)
def render_PUT(self, request):
body = request.content.read()
self._log.debug('Request body: %r', body)
try:
dbentry = json.loads(body)
except ValueError:
request.setResponseCode(400, 'invalid')
request.finish()
return NOT_DONE_YET
fqdn = dbentry['fqdn']
self._db[fqdn] = dbentry
request.setResponseCode(200, 'ok')
request.finish()
return NOT_DONE_YET
| Python | 0.000495 |
b8fc002fbc8a83486567c232d62678c3b4bb39b8 | Update new path | hassio/addons/git.py | hassio/addons/git.py | """Init file for HassIO addons git."""
import asyncio
import logging
from pathlib import Path
import shutil
import git
from .util import get_hash_from_repository
from ..const import URL_HASSIO_ADDONS
_LOGGER = logging.getLogger(__name__)
class AddonsRepo(object):
"""Manage addons git repo."""
def __init__(self, config, loop, path, url):
"""Initialize git base wrapper."""
self.config = config
self.loop = loop
self.repo = None
self.path = path
self.url = url
self._lock = asyncio.Lock(loop=loop)
async def load(self):
"""Init git addon repo."""
if not self.path.is_dir():
return await self.clone()
async with self._lock:
try:
_LOGGER.info("Load addon %s repository", self.path)
self.repo = await self.loop.run_in_executor(
None, git.Repo, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't load %s repo: %s.", self.path, err)
return False
return True
async def clone(self):
"""Clone git addon repo."""
async with self._lock:
try:
_LOGGER.info("Clone addon %s repository", self.url)
self.repo = await self.loop.run_in_executor(
None, git.Repo.clone_from, self.url, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't clone %s repo: %s.", self.url, err)
return False
return True
async def pull(self):
"""Pull git addon repo."""
if self._lock.locked():
_LOGGER.warning("It is already a task in progress.")
return False
async with self._lock:
try:
_LOGGER.info("Pull addon %s repository", self.url)
await self.loop.run_in_executor(
None, self.repo.remotes.origin.pull)
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't pull %s repo: %s.", self.url, err)
return False
return True
class AddonsRepoHassIO(AddonsRepo):
"""HassIO addons repository."""
def __init__(self, config, loop):
"""Initialize git hassio addon repository."""
super().__init__(
config, loop, config.path_addons_core, URL_HASSIO_ADDONS)
class AddonsRepoCustom(AddonsRepo):
"""Custom addons repository."""
def __init__(self, config, loop, url):
"""Initialize git hassio addon repository."""
path = Path(config.path_addons_git, get_hash_from_repository(url))
super().__init__(config, loop, path, url)
def remove(self):
"""Remove a custom addon."""
if self.path.is_dir():
_LOGGER.info("Remove custom addon repository %s", self.url)
def log_err(funct, path, _):
"""Log error."""
_LOGGER.warning("Can't remove %s", path)
shutil.rmtree(str(self.path), onerror=log_err)
| """Init file for HassIO addons git."""
import asyncio
import logging
from pathlib import Path
import shutil
import git
from .util import get_hash_from_repository
from ..const import URL_HASSIO_ADDONS
_LOGGER = logging.getLogger(__name__)
class AddonsRepo(object):
"""Manage addons git repo."""
def __init__(self, config, loop, path, url):
"""Initialize git base wrapper."""
self.config = config
self.loop = loop
self.repo = None
self.path = path
self.url = url
self._lock = asyncio.Lock(loop=loop)
async def load(self):
"""Init git addon repo."""
if not self.path.is_dir():
return await self.clone()
async with self._lock:
try:
_LOGGER.info("Load addon %s repository", self.path)
self.repo = await self.loop.run_in_executor(
None, git.Repo, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't load %s repo: %s.", self.path, err)
return False
return True
async def clone(self):
"""Clone git addon repo."""
async with self._lock:
try:
_LOGGER.info("Clone addon %s repository", self.url)
self.repo = await self.loop.run_in_executor(
None, git.Repo.clone_from, self.url, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't clone %s repo: %s.", self.url, err)
return False
return True
async def pull(self):
"""Pull git addon repo."""
if self._lock.locked():
_LOGGER.warning("It is already a task in progress.")
return False
async with self._lock:
try:
_LOGGER.info("Pull addon %s repository", self.url)
await self.loop.run_in_executor(
None, self.repo.remotes.origin.pull)
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't pull %s repo: %s.", self.url, err)
return False
return True
class AddonsRepoHassIO(AddonsRepo):
"""HassIO addons repository."""
def __init__(self, config, loop):
"""Initialize git hassio addon repository."""
super().__init__(
config, loop, config.path_addons_repo, URL_HASSIO_ADDONS)
class AddonsRepoCustom(AddonsRepo):
"""Custom addons repository."""
def __init__(self, config, loop, url):
"""Initialize git hassio addon repository."""
path = Path(config.path_addons_git, get_hash_from_repository(url))
super().__init__(config, loop, path, url)
def remove(self):
"""Remove a custom addon."""
if self.path.is_dir():
_LOGGER.info("Remove custom addon repository %s", self.url)
def log_err(funct, path, _):
"""Log error."""
_LOGGER.warning("Can't remove %s", path)
shutil.rmtree(str(self.path), onerror=log_err)
| Python | 0.000001 |
3ab50fb563a89449af94af0d870c6a6153afdb98 | reformat as pep8 | helper/listConcat.py | helper/listConcat.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class listConcat:
lists = None
def add(self, listToAdd):
if self.lists == None:
self.lists = [listToAdd]
return
for i, l in enumerate(self.lists):
if l[0] == listToAdd[-1]:
self.lists[i] = listToAdd[:-1] + l
return
elif l[-1] == listToAdd[0]:
self.lists[i] = l + listToAdd[1:]
return
elif l[0] == listToAdd[0]:
listToAdd.reverse()
self.lists[i] = listToAdd + l[1:]
return
elif l[-1] == listToAdd[-1]:
listToAdd.reverse()
self.lists[i] = l[:-1] + listToAdd
return
self.lists.append(listToAdd)
def get(self):
return self.lists
def testIt():
"""concats lists
>>> a = listConcat()
>>> a.get()
>>> a.add([1,2,3])
>>> a.get()
[[1, 2, 3]]
>>> a.add([-1,4,1])
>>> a.get()
[[-1, 4, 1, 2, 3]]
>>> a.add([3,5])
>>> a.get()
[[-1, 4, 1, 2, 3, 5]]
>>> a.add([2,5])
>>> a.get()
[[-1, 4, 1, 2, 3, 5, 2]]
>>> a.add([-1,7])
>>> a.get()
[[7, -1, 4, 1, 2, 3, 5, 2]]
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
class listConcat:
lists = None
def add(self, listToAdd):
if self.lists == None:
self.lists = [listToAdd]
return
for i, l in enumerate(self.lists):
if l[0] == listToAdd[-1]:
self.lists[i] = listToAdd[:-1] + l
return
elif l[-1] == listToAdd[0]:
self.lists[i] = l + listToAdd[1:]
return
elif l[0] == listToAdd[0]:
listToAdd.reverse()
self.lists[i] = listToAdd + l[1:]
return
elif l[-1] == listToAdd[-1]:
listToAdd.reverse()
self.lists[i] = l[:-1] + listToAdd
return
self.lists.append(listToAdd)
def get(self):
return self.lists
def testIt():
"""concats lists
>>> a = listConcat()
>>> a.get()
>>> a.add([1,2,3])
>>> a.get()
[[1, 2, 3]]
>>> a.add([-1,4,1])
>>> a.get()
[[-1, 4, 1, 2, 3]]
>>> a.add([3,5])
>>> a.get()
[[-1, 4, 1, 2, 3, 5]]
>>> a.add([2,5])
>>> a.get()
[[-1, 4, 1, 2, 3, 5, 2]]
>>> a.add([-1,7])
>>> a.get()
[[7, -1, 4, 1, 2, 3, 5, 2]]
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python | 0.999999 |
c52bb5699cc77d41095516c26ff909d856af8bd1 | Add TODOs | src/cloud/firebase_io.py | src/cloud/firebase_io.py | import json
from pyrebase import pyrebase
from utils import TimeUtils
class FirebaseIO():
def __init__(self):
# pyrebase_config.json is of format
# {
# "apiKey": "xxx",
# "authDomain": "xxx",
# "databaseURL": "xxx",
# "storageBucket": "xxx",
# "serviceAccount": "xxx.json"
# }
# TODO make configurable
with open('pyrebase_config.json') as fp:
config = json.load(fp)
firebase = pyrebase.initialize_app(config)
self.db = firebase.database()
def store_parking_event(self, request_json):
register_number = request_json['registerNumber']
parking_context_type = request_json['parkingContextType']
parking_event_json = {
'timestamp': TimeUtils.get_local_timestamp(),
'parkingType': parking_context_type
}
if parking_context_type == 'PAID':
parking_area_id = request_json['parkingAreaId']
parking_event_json['parkingDurationInMinutes'] = request_json['parkingDurationInMinutes']
elif parking_context_type == 'PARKING_DISC':
parking_area_id = 'PARKING_DISC_AREA'
results = self.db\
.child('parkingAreaParkingEvent')\
.child(parking_area_id)\
.child(register_number)\
.push(parking_event_json)
# Store notification about the event for event consumption
# > Notifications are stored in a flattened format
# > Better use of indexing for server side event consumers
notification_json = {
'parkingAreaId': parking_area_id,
'registerNumber': register_number,
'parkingEventId': results['name'],
'isConsumedByOccupancyAnalysis': False,
'isConsumedByLongTermDataStore': False,
'liveUntilTime': TimeUtils.get_epoch_timestamp_plus_seconds(60*60*24*7) # TODO make configurable
}
notification_result = self.db\
.child('parkingEventNotification')\
.push(notification_json)
return json.dumps(results)
def remove_dead_events(self):
notifications_ref = self.db.child('parkingEventNotification')
# TODO make time configurable
dead_notifications = notifications_ref\
.order_by_child('liveUntilTime')\
.start_at(TimeUtils.get_epoch_timestamp_plus_seconds(-365*24*60*60))\
.end_at(TimeUtils.get_epoch_timestamp_plus_seconds(0)).get()
dead_notifications = [(dn.key(), dn.val()) for dn in dead_notifications.each()
if all([dn.val()['isConsumedByOccupancyAnalysis'], dn.val()['isConsumedByLongTermDataStore']])]
for dn_id, dn in dead_notifications:
# Remove dead events
self.db.child('parkingAreaParkingEvent')\
.child(dn['parkingAreaId'])\
.child(dn['registerNumber'])\
.child(dn['parkingEventId'])\
.remove()
# Remove dead notifications
self.db.child('parkingEventNotification')\
.child(dn_id)\
.remove()
# consumer is either LongTermDataStore or OccupancyAnalysis
def consume_new_parking_events_by(self, consumer):
consumed_notifications = self.db\
.child('parkingEventNotification')\
.order_by_child('isConsumedBy' + consumer)\
.start_at(False).end_at(False)\
.get()
result = []
for cn in consumed_notifications.each():
# Get parking event for the result set
parking_event = self.db\
.child('parkingAreaParkingEvent')\
.child(cn.val()['parkingAreaId'])\
.child(cn.val()['registerNumber'])\
.child(cn.val()['parkingEventId'])\
.get()
result.append(parking_event.val())
# TODO: notifications may be checked even if the following processes fail
# TODO: form transaction
# Set parking event as consumed
self.db\
.child('parkingEventNotification')\
.child(cn.key())\
.update({'isConsumedBy'+consumer:True})
return result
| import json
from pyrebase import pyrebase
from utils import TimeUtils
class FirebaseIO():
def __init__(self):
# pyrebase_config.json is of format
# {
# "apiKey": "xxx",
# "authDomain": "xxx",
# "databaseURL": "xxx",
# "storageBucket": "xxx",
# "serviceAccount": "xxx.json"
# }
with open('pyrebase_config.json') as fp:
config = json.load(fp)
firebase = pyrebase.initialize_app(config)
self.db = firebase.database()
def store_parking_event(self, request_json):
register_number = request_json['registerNumber']
parking_context_type = request_json['parkingContextType']
parking_event_json = {
'timestamp': TimeUtils.get_local_timestamp(),
'parkingType': parking_context_type
}
if parking_context_type == 'PAID':
parking_area_id = request_json['parkingAreaId']
parking_event_json['parkingDurationInMinutes'] = request_json['parkingDurationInMinutes']
elif parking_context_type == 'PARKING_DISC':
parking_area_id = 'PARKING_DISC_AREA'
results = self.db\
.child('parkingAreaParkingEvent')\
.child(parking_area_id)\
.child(register_number)\
.push(parking_event_json)
# Store notification about the event for event consumption
# > Notifications are stored in a flattened format
# > Better use of indexing for server side event consumers
notification_json = {
'parkingAreaId': parking_area_id,
'registerNumber': register_number,
'parkingEventId': results['name'],
'isConsumedByOccupancyAnalysis': False,
'isConsumedByLongTermDataStore': False,
'liveUntilTime': TimeUtils.get_epoch_timestamp_plus_seconds(60*60*24*7)
}
notification_result = self.db\
.child('parkingEventNotification')\
.push(notification_json)
return json.dumps(results)
def remove_dead_events(self):
notifications_ref = self.db.child('parkingEventNotification')
dead_notifications = notifications_ref\
.order_by_child('liveUntilTime')\
.start_at(TimeUtils.get_epoch_timestamp_plus_seconds(-365*24*60*60))\
.end_at(TimeUtils.get_epoch_timestamp_plus_seconds(0)).get()
dead_notifications = [(dn.key(), dn.val()) for dn in dead_notifications.each()
if all([dn.val()['isConsumedByOccupancyAnalysis'], dn.val()['isConsumedByLongTermDataStore']])]
for dn_id, dn in dead_notifications:
# Remove dead events
self.db.child('parkingAreaParkingEvent')\
.child(dn['parkingAreaId'])\
.child(dn['registerNumber'])\
.child(dn['parkingEventId'])\
.remove()
# Remove dead notifications
self.db.child('parkingEventNotification')\
.child(dn_id)\
.remove()
# consumer is either LongTermDataStore or OccupancyAnalysis
def consume_new_parking_events_by(self, consumer):
consumed_notifications = self.db\
.child('parkingEventNotification')\
.order_by_child('isConsumedBy' + consumer)\
.start_at(False).end_at(False)\
.get()
result = []
for cn in consumed_notifications.each():
# Get parking event for the result set
parking_event = self.db\
.child('parkingAreaParkingEvent')\
.child(cn.val()['parkingAreaId'])\
.child(cn.val()['registerNumber'])\
.child(cn.val()['parkingEventId'])\
.get()
result.append(parking_event.val())
# TODO: notifications may be checked even if the following processes fail
# TODO: form transaction
# Set parking event as consumed
self.db\
.child('parkingEventNotification')\
.child(cn.key())\
.update({'isConsumedBy'+consumer:True})
return result
| Python | 0.000001 |
e4be9429e050dae6b1c9e988fa3da3c3e9d1d417 | Add bots root directory to parent.py | test/common/parent.py | test/common/parent.py | import os
import sys
TEST_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BOTS_DIR = os.path.join(os.path.dirname(TEST_DIR), "bots")
sys.path.append(os.path.join(BOTS_DIR)) # for lib
sys.path.append(os.path.join(BOTS_DIR, "machine"))
sys.path.append(os.path.join(TEST_DIR, "common"))
| import os
import sys
TEST_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BOTS_DIR = os.path.join(os.path.dirname(TEST_DIR), "bots")
sys.path.append(os.path.join(BOTS_DIR, "machine"))
sys.path.append(os.path.join(TEST_DIR, "common"))
| Python | 0 |
826f23f0fc7eea4c72dcc26f637f3752bee51b47 | Allow tests to be called from parent directory of "test" | test/ctypesgentest.py | test/ctypesgentest.py | import optparse, sys, StringIO
sys.path.append(".") # Allow tests to be called from parent directory with Python 2.6
sys.path.append("..")
import ctypesgencore
"""ctypesgentest is a simple module for testing ctypesgen on various C constructs. It consists of a
single function, test(). test() takes a string that represents a C header file, along with some
keyword arguments representing options. It processes the header using ctypesgen and returns a tuple
containing the resulting module object and the output that ctypesgen produced."""
def test(header, **more_options):
assert isinstance(header, str)
file("temp.h","w").write(header)
options = ctypesgencore.options.get_default_options()
options.headers = ["temp.h"]
for opt in more_options:
setattr(options, opt, more_options[opt])
# Redirect output
sys.stdout = StringIO.StringIO()
# Step 1: Parse
descriptions=ctypesgencore.parser.parse(options.headers,options)
# Step 2: Process
ctypesgencore.processor.process(descriptions,options)
# Step 3: Print
ctypesgencore.printer.WrapperPrinter("temp.py",options,descriptions)
# Un-redirect output
output = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = sys.__stdout__
# Load the module we have just produced
module = __import__("temp")
return module, output | import optparse, sys, StringIO
sys.path.append("..")
import ctypesgencore
"""ctypesgentest is a simple module for testing ctypesgen on various C constructs. It consists of a
single function, test(). test() takes a string that represents a C header file, along with some
keyword arguments representing options. It processes the header using ctypesgen and returns a tuple
containing the resulting module object and the output that ctypesgen produced."""
def test(header, **more_options):
assert isinstance(header, str)
file("temp.h","w").write(header)
options = ctypesgencore.options.get_default_options()
options.headers = ["temp.h"]
for opt in more_options:
setattr(options, opt, more_options[opt])
# Redirect output
sys.stdout = StringIO.StringIO()
# Step 1: Parse
descriptions=ctypesgencore.parser.parse(options.headers,options)
# Step 2: Process
ctypesgencore.processor.process(descriptions,options)
# Step 3: Print
ctypesgencore.printer.WrapperPrinter("temp.py",options,descriptions)
# Un-redirect output
output = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = sys.__stdout__
# Load the module we have just produced
module = __import__("temp")
return module, output | Python | 0.000001 |
bd7a1f8fe5237efc0de9fd60ddc035cc4be620ca | Update path_helper.py | unintended_ml_bias/new_madlibber/path_helper.py | unintended_ml_bias/new_madlibber/path_helper.py | import os
class PathHelper(object):
def __init__(self, word_file, sentence_template_file, output_file):
if not os.path.exists(word_file):
raise IOError("Input word file '{}' does not exist!".format(word_file))
if not os.path.isfile(word_file):
raise IOError("Input word file '{}' is not a file!".format(word_file))
self.word_file = word_file
if not os.path.exists(sentence_template_file):
raise IOError("Input sentence template file '{}' does not exist!".format(
sentence_template_file))
if not os.path.isfile(sentence_template_file):
raise IOError("Input sentence template file '{}' is not a file!".format(
sentence_template_file))
self.sentence_template_file = sentence_template_file
if not os.path.basename(output_file):
raise IOError(
"Output file '{}' cannot be a directory.".format(output_file))
output_dirname = os.path.dirname(output_file)
if not os.path.exists(output_dirname):
print("Output directory '{}' does not exist...creating".format(
output_dirname))
os.makedirs(output_dirname)
self.output_file = output_file
| import os
class PathHelper(object):
def __init__(self, word_file, sentence_template_file, output_file):
if not os.path.exists(word_file):
raise IOError("Input word file '{}' does not exist!".format(word_file))
if not os.path.isfile(word_file):
raise IOError("Input word file '{}' is not a file!".format(word_file))
self.word_file = word_file
if not os.path.exists(sentence_template_file):
raise IOError("Input sentence template file '{}' does not exist!".format(sentence_template_file))
if not os.path.isfile(sentence_template_file):
raise IOError("Input sentence template file '{}' is not a file!".format(sentence_template_file))
self.sentence_template_file = sentence_template_file
if os.path.basename(output_file) == '':
raise IOError("Output file '{}' cannot be a directory.".format(output_file))
output_dirname = os.path.dirname(output_file)
if not os.path.exists(output_dirname):
print("Output directory '{}' does not exist...creating".format(output_dirname))
os.makedirs(output_dirname)
self.output_file = output_file
| Python | 0.000004 |
3d4afd579bdd690c9fba94ee96e52257bf4d79d2 | copy production procfile | reactive/huginn.py | reactive/huginn.py | from charms.reactive import (
hook,
when,
only_once,
is_state
)
import os.path as path
from charmhelpers.core import hookenv, host
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from shell import shell
# ./lib/nginxlib
import nginxlib
# ./lib/rubylib
from rubylib import ruby_dist_dir, bundle
# ./lib/huginnlib.py
import huginnlib
config = hookenv.config()
# HOOKS -----------------------------------------------------------------------
@hook('config-changed')
def config_changed():
if not is_state('nginx.available'):
return
host.service_restart('nginx')
hookenv.status_set('active', 'Ready')
# REACTORS --------------------------------------------------------------------
@when('nginx.available')
@only_once
def install_app():
""" Performs application installation
"""
hookenv.log('Installing Huginn', 'info')
# Configure NGINX vhost
nginxlib.configure_site('default', 'vhost.conf',
app_path=ruby_dist_dir())
# Update application
huginnlib.download_archive()
shell("mkdir -p %s/{log,tmp/pids,tmp/sockets}" % (ruby_dist_dir()))
shell("cp %(dir)s/config/unicorn.rb.example "
"%(dir)s/config/unicorn.rb" % {'dir': ruby_dist_dir()})
bundle("install --deployment --without development test")
procfile = path.join(hookenv.charm_dir(), 'templates/Procfile')
shell("cp %(procfile)s %(dir)s/Procfile" % {
'procfile': procfile,
'dir': ruby_dist_dir()
})
bundle("exec rake assets:precompile RAILS_ENV=production")
host.service_restart('nginx')
hookenv.status_set('active', 'Huginn is installed!')
@when('nginx.available', 'database.available')
def setup_mysql(mysql):
""" Mysql is available, update Huginn
"""
hookenv.status_set('maintenance', 'Huginn is connecting to MySQL!')
target = path.join(ruby_dist_dir(), '.env')
render(source='application.env',
target=target,
context=dict(db=mysql))
bundle("exec rake db:create RAILS_ENV=production")
bundle("exec rake db:migrate RAILS_ENV=production")
bundle("exec rake db:seed RAILS_ENV=production")
host.service_restart('nginx')
hookenv.status_set('active', 'Ready')
| from charms.reactive import (
hook,
when,
only_once,
is_state
)
import os.path as path
from charmhelpers.core import hookenv, host
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from shell import shell
# ./lib/nginxlib
import nginxlib
# ./lib/rubylib
from rubylib import ruby_dist_dir, bundle
# ./lib/huginnlib.py
import huginnlib
config = hookenv.config()
# HOOKS -----------------------------------------------------------------------
@hook('config-changed')
def config_changed():
if not is_state('nginx.available'):
return
host.service_restart('nginx')
hookenv.status_set('active', 'Ready')
# REACTORS --------------------------------------------------------------------
@when('nginx.available')
@only_once
def install_app():
""" Performs application installation
"""
hookenv.log('Installing Huginn', 'info')
# Configure NGINX vhost
nginxlib.configure_site('default', 'vhost.conf',
app_path=ruby_dist_dir())
# Update application
huginnlib.download_archive()
shell("mkdir -p %s/{log,tmp/pids,tmp/sockets}" % (ruby_dist_dir()))
shell("cp %(dir)s/config/unicorn.rb.example "
"%(dir)s/config/unicorn.rb" % {'dir': ruby_dist_dir()})
bundle("install --deployment --without development test")
bundle("exec rake assets:precompile RAILS_ENV=production")
host.service_restart('nginx')
hookenv.status_set('active', 'Huginn is installed!')
@when('nginx.available', 'database.available')
def setup_mysql(mysql):
""" Mysql is available, update Huginn
"""
hookenv.status_set('maintenance', 'Huginn is connecting to MySQL!')
target = path.join(ruby_dist_dir(), '.env')
render(source='application.env',
target=target,
context=dict(db=mysql))
bundle("exec rake db:create RAILS_ENV=production")
bundle("exec rake db:migrate RAILS_ENV=production")
bundle("exec rake db:seed RAILS_ENV=production")
host.service_restart('nginx')
hookenv.status_set('active', 'Ready')
| Python | 0 |
86216b39365a7877103dfe075bf8e08a8ce696d0 | bump version | radar/__init__.py | radar/__init__.py | __version__ = '2.47.23'
| __version__ = '2.47.22'
| Python | 0 |
3bfcc096acd5f3ed0cda2427bdc5177bd3e55dd7 | bump version | radar/__init__.py | radar/__init__.py | __version__ = '2.46.26'
| __version__ = '2.46.25'
| Python | 0 |
d94e862d5775ecedf49fb0e15820b4744573c24c | bump to 1.1.1 | radon/__init__.py | radon/__init__.py | '''This module contains the main() function, which is the entry point for the
command line interface.'''
__version__ = '1.1.1'
def main():
'''The entry point for Setuptools.'''
import sys
from radon.cli import program, log_error
if not sys.argv[1:]:
sys.argv.append('-h')
try:
program()
except Exception as e:
log_error(e)
if __name__ == '__main__':
main()
| '''This module contains the main() function, which is the entry point for the
command line interface.'''
__version__ = '1.1'
def main():
'''The entry point for Setuptools.'''
import sys
from radon.cli import program, log_error
if not sys.argv[1:]:
sys.argv.append('-h')
try:
program()
except Exception as e:
log_error(e)
if __name__ == '__main__':
main()
| Python | 0.000001 |
4d167d2b8ed024d1ea11f6f9ab0743601bddc8f5 | word_count | examples/word_count/insert_data.py | examples/word_count/insert_data.py | import sys, os, logging
import pymongo
import bson
import datetime
config = {
"db_name": "test",
"collection_name": "wc",
"input_uri": "mongodb://localhost/test.wc",
"create_input_splits": True,
"split_key": {'_id' : 1},
}
if __name__ == '__main__':
conn = pymongo.Connection()
db = conn[config.get('db_name')]
coll = db[config.get('collection_name')]
logfile = open("beyond_lies_the_wub.txt","r").readlines()
print 'opened file'
for line in logfile:
for word in line.split():
post = {"file_text" : word, "date" : datetime.datetime.utcnow()}
coll.insert(post)
| import sys, os, logging
import pymongo
import bson
import datetime
config = {
"db_name": "test",
"collection_name": "wc",
"input_uri": "mongodb://localhost/test.wc",
"create_input_splits": True,
"split_key": {'_id' : 1},
}
if __name__ == '__main__':
conn = pymongo.Connection()
db = conn[config.get('db_name')]
coll = db[config.get('collection_name')]
logfile = open("beyond_lies_the_wub.txt","r").readlines()
print 'opened file'
for line in logfile:
#print ', and line is %s'%line
for word in line.split():
post = {"file_text" : word, "date" : datetime.datetime.utcnow()}
print 'post: %s '%post
coll.insert(post)
| Python | 0.998596 |
370d58420c48ed5291fb3291a3f89449b2fb5230 | Add description to update-production script | docker/update-production.py | docker/update-production.py | #!/usr/bin/env python3
import argparse
import subprocess
import json
import sys
parser = argparse.ArgumentParser(description='Update production server to latest Docker image.')
args = parser.parse_args()
def _info(msg):
sys.stdout.write('* {}\n'.format(msg))
sys.stdout.flush()
def _run_tutum(args):
try:
subprocess.check_call(['tutum',] + args, stdout=subprocess.PIPE)
except subprocess.CalledProcessError as err:
sys.stderr.write('{}\n'.format(err))
sys.exit(1)
_info('Determining current production details...')
output = subprocess.check_output(['tutum', 'service', 'inspect', 'lb.muzhack-staging']).decode(
'utf-8')
data = json.loads(output)
linked_service = data['linked_to_service'][0]['name']
_info('Currently linked service is \'{}\''.format(linked_service))
if linked_service == 'muzhack-green':
link_to = 'muzhack-blue'
else:
assert linked_service == 'muzhack-blue'
link_to = 'muzhack-green'
_info('Redeploying service \'{}\'...'.format(link_to))
_run_tutum(['service', 'redeploy', '--sync', link_to,])
_info('Linking to service \'{}\'...'.format(link_to))
_run_tutum(['service', 'set', '--link-service', '{0}:{0}'.format(link_to),
'--sync', 'lb.muzhack-staging',])
_info('Successfully switched production service to {}'.format(link_to))
| #!/usr/bin/env python3
import argparse
import subprocess
import json
import sys
parser = argparse.ArgumentParser()
args = parser.parse_args()
def _info(msg):
sys.stdout.write('* {}\n'.format(msg))
sys.stdout.flush()
def _run_tutum(args):
try:
subprocess.check_call(['tutum',] + args, stdout=subprocess.PIPE)
except subprocess.CalledProcessError as err:
sys.stderr.write('{}\n'.format(err))
sys.exit(1)
_info('Determining current production details...')
output = subprocess.check_output(['tutum', 'service', 'inspect', 'lb.muzhack-staging']).decode(
'utf-8')
data = json.loads(output)
linked_service = data['linked_to_service'][0]['name']
_info('Currently linked service is \'{}\''.format(linked_service))
if linked_service == 'muzhack-green':
link_to = 'muzhack-blue'
else:
assert linked_service == 'muzhack-blue'
link_to = 'muzhack-green'
_info('Redeploying service \'{}\'...'.format(link_to))
_run_tutum(['service', 'redeploy', '--sync', link_to,])
_info('Linking to service \'{}\'...'.format(link_to))
_run_tutum(['service', 'set', '--link-service', '{0}:{0}'.format(link_to),
'--sync', 'lb.muzhack-staging',])
_info('Successfully switched production service to {}'.format(link_to))
| Python | 0 |
f4a8121bf38cdd8dea4a828316dc1c117c5ea0f3 | update West Devon import script for parl.2017-06-08 (closes #902) | polling_stations/apps/data_collection/management/commands/import_west_devon.py | polling_stations/apps/data_collection/management/commands/import_west_devon.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000047'
addresses_name = 'parl.2017-06-08/Version 2/merged.tsv'
stations_name = 'parl.2017-06-08/Version 2/merged.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000047'
addresses_name = 'Democracy_Club__04May2017 - west devon.TSV'
stations_name = 'Democracy_Club__04May2017 - west devon.TSV'
elections = [
'local.devon.2017-05-04',
'parl.2017-06-08'
]
csv_delimiter = '\t'
| Python | 0 |
4686448c2de3a49f4c1d4593327e1072de9644f7 | Return result of the exec runner. | dockermap/map/runner/cmd.py | dockermap/map/runner/cmd.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from ..action import ContainerUtilAction
from ..input import ItemType
log = logging.getLogger(__name__)
class ExecMixin(object):
"""
Utility mixin for executing configured commands inside containers.
"""
action_method_names = [
(ItemType.CONTAINER, ContainerUtilAction.EXEC_COMMANDS, 'exec_commands'),
(ItemType.CONTAINER, ContainerUtilAction.EXEC_ALL, 'exec_container_commands'),
]
def exec_commands(self, action, c_name, run_cmds, **kwargs):
"""
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
client = action.client
exec_results = []
for run_cmd in run_cmds:
cmd = run_cmd.cmd
cmd_user = run_cmd.user
log.debug("Creating exec command in container %s with user %s: %s.", c_name, cmd_user, cmd)
ec_kwargs = self.get_exec_create_kwargs(action, c_name, cmd, cmd_user)
create_result = client.exec_create(**ec_kwargs)
if create_result:
e_id = create_result['Id']
log.debug("Starting exec command with id %s.", e_id)
es_kwargs = self.get_exec_start_kwargs(action, c_name, e_id)
client.exec_start(**es_kwargs)
exec_results.append(create_result)
else:
log.debug("Exec command was created, but did not return an id. Assuming that it has been started.")
if exec_results:
return exec_results
return None
def exec_container_commands(self, action, c_name, **kwargs):
"""
Runs all configured commands of a container configuration inside the container instance.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
config_cmds = action.config.exec_commands
if not config_cmds:
return None
return self.exec_commands(action, c_name, run_cmds=config_cmds)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from ..action import ContainerUtilAction
from ..input import ItemType
log = logging.getLogger(__name__)
class ExecMixin(object):
"""
Utility mixin for executing configured commands inside containers.
"""
action_method_names = [
(ItemType.CONTAINER, ContainerUtilAction.EXEC_COMMANDS, 'exec_commands'),
(ItemType.CONTAINER, ContainerUtilAction.EXEC_ALL, 'exec_container_commands'),
]
def exec_commands(self, action, c_name, run_cmds, **kwargs):
"""
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
"""
client = action.client
for run_cmd in run_cmds:
cmd = run_cmd.cmd
cmd_user = run_cmd.user
log.debug("Creating exec command in container %s with user %s: %s.", c_name, cmd_user, cmd)
ec_kwargs = self.get_exec_create_kwargs(action, c_name, cmd, cmd_user)
create_result = client.exec_create(**ec_kwargs)
if create_result:
e_id = create_result['Id']
log.debug("Starting exec command with id %s.", e_id)
es_kwargs = self.get_exec_start_kwargs(action, c_name, e_id)
client.exec_start(**es_kwargs)
else:
log.debug("Exec command was created, but did not return an id. Assuming that it has been started.")
def exec_container_commands(self, action, c_name, **kwargs):
"""
Runs all configured commands of a container configuration inside the container instance.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
"""
config_cmds = action.config.exec_commands
if not config_cmds:
return
self.exec_commands(action, c_name, run_cmds=config_cmds)
| Python | 0 |
7b1871b311aae41d699a41da7c6553b45a588313 | purge wip about cassandra metrics (not-the-right-place) | feedly/storage/cassandra/models.py | feedly/storage/cassandra/models.py | from cqlengine import columns
from cqlengine.models import Model
from cqlengine.exceptions import ValidationError
class VarInt(columns.Column):
db_type = 'varint'
def validate(self, value):
val = super(VarInt, self).validate(value)
if val is None:
return
try:
return long(val)
except (TypeError, ValueError):
raise ValidationError(
"{} can't be converted to integer value".format(value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class BaseActivity(Model):
feed_id = columns.Ascii(primary_key=True, partition_key=True)
activity_id = VarInt(primary_key=True, clustering_order='desc')
class Activity(BaseActivity):
actor = columns.Integer(required=False)
extra_context = columns.Bytes(required=False)
object = columns.Integer(required=False)
target = columns.Integer(required=False)
time = columns.DateTime(required=False)
verb = columns.Integer(required=False)
class AggregatedActivity(BaseActivity):
activities = columns.Bytes(required=False)
created_at = columns.DateTime(required=False)
group = columns.Ascii(required=False)
updated_at = columns.DateTime(required=False)
| from cqlengine import columns
from cqlengine.models import Model
from cqlengine.exceptions import ValidationError
class VarInt(columns.Column):
db_type = 'varint'
def validate(self, value):
val = super(VarInt, self).validate(value)
if val is None:
return
try:
return long(val)
except (TypeError, ValueError):
raise ValidationError(
"{} can't be converted to integer value".format(value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class BaseActivity(Model):
feed_id = columns.Ascii(primary_key=True, partition_key=True)
activity_id = VarInt(primary_key=True, clustering_order='desc')
class Activity(BaseActivity):
actor = columns.Integer(required=False)
extra_context = columns.Bytes(required=False)
object = columns.Integer(required=False)
target = columns.Integer(required=False)
time = columns.DateTime(required=False)
verb = columns.Integer(required=False)
class AggregatedActivity(BaseActivity):
activities = columns.Bytes(required=False)
created_at = columns.DateTime(required=False)
group = columns.Ascii(required=False)
updated_at = columns.DateTime(required=False)
class FanoutStats(Model):
consumer_feed_id = columns.Ascii(primary_key=True, partition_key=True)
fanout_at = columns.DateTime(primary_key=True, partition_key=True)
date = columns.DateTime(primary_key=True, clustering_order='desc')
producer_feed_id = columns.Ascii()
activity_count = columns.Integer(default=1)
operation = columns.Ascii()
class ActivityStats(Model):
producer_feed_id = columns.Ascii(primary_key=True)
date = columns.DateTime(primary_key=True, partition_key=True)
activity_count = columns.Integer(default=1)
| Python | 0 |
f7fc6556e3ef552ed570ad56db7dc3a19b3e75fd | Load config from site | fix-broken-double-redirect/edit.py | fix-broken-double-redirect/edit.py | # -*- coding: utf-8 -*-
import argparse
import json
import os
import re
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
from config import config_page_name # pylint: disable=E0611,W0614
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--check', action='store_true', dest='check')
parser.set_defaults(check=False)
args = parser.parse_args()
print(args)
os.environ['TZ'] = 'UTC'
site = pywikibot.Site()
site.login()
config_page = pywikibot.Page(site, config_page_name)
cfg = config_page.text
cfg = json.loads(cfg)
print(json.dumps(cfg, indent=4, ensure_ascii=False))
if not cfg['enable']:
exit('disabled\n')
cat = pywikibot.Page(site, cfg['csd_category'])
for sourcePage in site.categorymembers(cat):
print(sourcePage.title())
text = sourcePage.text
if '{{d|bot=Jimmy-bot|g15|' not in text:
print('\tnot g15')
continue
m = re.search(r'#(?:重定向|REDIRECT) ?\[\[(.+?)]]', text, flags=re.I)
if m:
middlePage = pywikibot.Page(site, m.group(1))
logs = list(site.logevents(page=middlePage, total=1))
if len(logs) == 0:
print('\tno logs')
continue
log = logs[0]
if log.type() != 'move':
print('\trecent log not move')
continue
targetPage = log.target_page
print('\ttarget', targetPage.title())
text = re.sub(r'^{{d\|bot=Jimmy-bot\|g15\|.+\n', '', text)
text = re.sub(r'(#(?:重定向|REDIRECT) ?\[\[).+?(]])', r'\g<1>{}\g<2>'.format(targetPage.title()), text)
pywikibot.showDiff(sourcePage.text, text)
summary = cfg['summary'].format(log.logid())
print(summary)
if args.check and input('Save?').lower() not in ['', 'y', 'yes']:
continue
sourcePage.text = text
sourcePage.save(summary=summary, minor=False, asynchronous=True)
else:
print('\tcannot get redirect target')
| # -*- coding: utf-8 -*-
import argparse
import os
import re
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--check', action='store_true', dest='check')
parser.set_defaults(check=False)
args = parser.parse_args()
print(args)
os.environ['TZ'] = 'UTC'
site = pywikibot.Site()
site.login()
cat = pywikibot.Page(site, 'Category:快速删除候选')
for sourcePage in site.categorymembers(cat):
print(sourcePage.title())
text = sourcePage.text
if '{{d|bot=Jimmy-bot|g15|' not in text:
print('\tnot g15')
continue
m = re.search(r'#(?:重定向|REDIRECT) ?\[\[(.+?)]]', text, flags=re.I)
if m:
middlePage = pywikibot.Page(site, m.group(1))
logs = list(site.logevents(page=middlePage, total=1))
if len(logs) == 0:
print('\tno logs')
continue
log = logs[0]
if log.type() != 'move':
print('\trecent log not move')
continue
targetPage = log.target_page
print('\ttarget', targetPage.title())
text = re.sub(r'^{{d\|bot=Jimmy-bot\|g15\|.+\n', '', text)
text = re.sub(r'(#(?:重定向|REDIRECT) ?\[\[).+?(]])', r'\g<1>{}\g<2>'.format(targetPage.title()), text)
pywikibot.showDiff(sourcePage.text, text)
summary = '-delete並修復損壞的雙重重定向,[[Special:Redirect/logid/{}|目標頁已被不留重定向移動]],若認為重定向不合適請提交存廢討論'.format(log.logid())
print(summary)
if args.check and input('Save?').lower() not in ['', 'y', 'yes']:
continue
sourcePage.text = text
sourcePage.save(summary=summary, minor=False, asynchronous=True)
else:
print('\tcannot get redirect target')
| Python | 0.000001 |
5ec45f9d8a7b4c54ecca0ad48f244c2ab0b8d532 | remove no longer necessary package declaration | src/zeit/content/cp/blocks/tests.py | src/zeit/content/cp/blocks/tests.py | # -*- coding: utf-8 -*-
# Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import zeit.content.cp.blocks
import zeit.content.cp.testing
def test_suite():
return zeit.content.cp.testing.FunctionalDocFileSuite(
'teaser.txt',
'xml.txt')
| # -*- coding: utf-8 -*-
# Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import zeit.content.cp.blocks
import zeit.content.cp.testing
def test_suite():
return zeit.content.cp.testing.FunctionalDocFileSuite(
'teaser.txt',
'xml.txt',
package=zeit.content.cp.blocks)
| Python | 0 |
69eafa95df4bdeb143d40c321f0a312d06efff1f | Add __all__ to segmentation package | skimage/segmentation/__init__.py | skimage/segmentation/__init__.py | from .random_walker_segmentation import random_walker
from ._felzenszwalb import felzenszwalb
from ._slic import slic
from ._quickshift import quickshift
from .boundaries import find_boundaries, visualize_boundaries, mark_boundaries
from ._clear_border import clear_border
from ._join import join_segmentations, relabel_from_one
__all__ = ['random_walker',
'felzenszwalb',
'slic',
'quickshift',
'find_boundaries',
'visualize_boundaries',
'mark_boundaries',
'clear_border',
'join_segmentations',
'relabel_from_one']
| from .random_walker_segmentation import random_walker
from ._felzenszwalb import felzenszwalb
from ._slic import slic
from ._quickshift import quickshift
from .boundaries import find_boundaries, visualize_boundaries, mark_boundaries
from ._clear_border import clear_border
from ._join import join_segmentations, relabel_from_one
| Python | 0.000919 |
d59f3259875ffac49668ffb3ce34ca511385ebb7 | Fix USE_X_FORWARDED_FOR for proxied environments | rated/settings.py | rated/settings.py |
from django.conf import settings
DEFAULT_REALM = getattr(settings, 'RATED_DEFAULT_REALM', 'default')
DEFAULT_LIMIT = getattr(settings, 'RATED_DEFAULT_LIMIT', 100)
DEFAULT_DURATION = getattr(settings, 'RATED_DEFAULT_DURATION', 60 * 60)
RESPONSE_CODE = getattr(settings, 'RATED_RESPONSE_CODE', 429)
RESPONSE_MESSAGE = getattr(settings, 'RATED_RESPONSE_MESSAGE', '')
DEFAULT_WHITELIST = getattr(settings, 'RATED_DEFAULT_WHITELIST', [])
REALMS = getattr(settings, 'RATED_REALMS', {})
REALM_MAP = getattr(settings, 'RATED_REALM_MAP', {})
# Redis config parameters
REDIS = getattr(settings, 'RATED_REDIS', {})
USE_X_FORWARDED_FOR = getattr(settings, 'USE_X_FORWARDED_FOR', False)
|
from django.conf import settings
DEFAULT_REALM = getattr(settings, 'RATED_DEFAULT_REALM', 'default')
DEFAULT_LIMIT = getattr(settings, 'RATED_DEFAULT_LIMIT', 100)
DEFAULT_DURATION = getattr(settings, 'RATED_DEFAULT_DURATION', 60 * 60)
RESPONSE_CODE = getattr(settings, 'RATED_RESPONSE_CODE', 429)
RESPONSE_MESSAGE = getattr(settings, 'RATED_RESPONSE_MESSAGE', '')
DEFAULT_WHITELIST = getattr(settings, 'RATED_DEFAULT_WHITELIST', [])
REALMS = getattr(settings, 'RATED_REALMS', {})
REALM_MAP = getattr(settings, 'RATED_REALM_MAP', {})
# Redis config parameters
REDIS = getattr(settings, 'RATED_REDIS', {})
| Python | 0 |
b613ecdb3e543a4c39c5bd80359c81e504c1da33 | add -mlong-calls to gcc compile parameter | examples/module/rtconfig_lm3s.py | examples/module/rtconfig_lm3s.py | # bsp name
BSP = 'lm3s8962'
# toolchains
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'so'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3'
CFLAGS = DEVICE + ' -mthumb -mlong-calls -Dsourcerygxx -O0 -fPIC'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -mthumb -Wl,-z,max-page-size=0x4 -shared -fPIC -e main -nostdlib'
CPATH = ''
LPATH = ''
| # bsp name
BSP = 'lm3s8962'
# toolchains
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'so'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3'
CFLAGS = DEVICE + ' -mthumb -Dsourcerygxx -O0 -fPIC'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -mthumb -Wl,-z,max-page-size=0x4 -shared -fPIC -e main -nostdlib'
CPATH = ''
LPATH = ''
| Python | 0.000002 |
ce869c128d728af4c296eb96ecae0db6f30996a7 | Make brnn_ptb_test write checkpoints to temp directory | sonnet/examples/brnn_ptb_test.py | sonnet/examples/brnn_ptb_test.py | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for brnn_ptb."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import string
from sonnet.examples import brnn_ptb
import tensorflow as tf
FLAGS = tf.flags.FLAGS
def _make_random_word():
return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase)
for _ in range(random.randint(1, 15)))
def _make_random_vocab():
# Make a limited vocab that all the sentences should be made out of, as the
# BRNN model builds a finite vocab internally.
return [_make_random_word() for _ in range(1000)]
def _make_sentence_with_vocab(vocab):
return ' '.join(vocab[random.randint(0, len(vocab) - 1)]
for _ in range(random.randint(1, 30)))
def _make_fake_corpus_with_vocab(vocab, corpus_size):
return '\n'.join(_make_sentence_with_vocab(vocab)
for _ in range(corpus_size))
class BrnnPtbTest(tf.test.TestCase):
def testScriptRunsWithFakeData(self):
# Make some small fake data in same format as real PTB.
tmp_dir = tf.test.get_temp_dir()
vocab = _make_random_vocab()
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.train.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 1000))
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.valid.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 100))
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.test.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 100))
# Make model small, only run for 1 epoch.
FLAGS.num_training_epochs = 1
FLAGS.hidden_size = 50
FLAGS.embedding_size = 50
FLAGS.data_path = tmp_dir
# Checkpoint to tmp directory so that test runs hermetically, and there is
# no possibility of reusing checkpoints from previous runs.
FLAGS.logbasedir = tmp_dir
# Do training, test, evaluation.
brnn_ptb.main(None)
if __name__ == '__main__':
tf.test.main()
| # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for brnn_ptb."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import string
from sonnet.examples import brnn_ptb
import tensorflow as tf
FLAGS = tf.flags.FLAGS
def _make_random_word():
return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase)
for _ in range(random.randint(1, 15)))
def _make_random_vocab():
# Make a limited vocab that all the sentences should be made out of, as the
# BRNN model builds a finite vocab internally.
return [_make_random_word() for _ in range(1000)]
def _make_sentence_with_vocab(vocab):
return ' '.join(vocab[random.randint(0, len(vocab) - 1)]
for _ in range(random.randint(1, 30)))
def _make_fake_corpus_with_vocab(vocab, corpus_size):
return '\n'.join(_make_sentence_with_vocab(vocab)
for _ in range(corpus_size))
class BrnnPtbTest(tf.test.TestCase):
def testScriptRunsWithFakeData(self):
# Make some small fake data in same format as real PTB.
tmp_dir = tf.test.get_temp_dir()
vocab = _make_random_vocab()
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.train.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 1000))
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.valid.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 100))
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.test.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 100))
# Make model small, only run for 1 epoch.
FLAGS.num_training_epochs = 1
FLAGS.hidden_size = 50
FLAGS.embedding_size = 50
FLAGS.data_path = tmp_dir
# Do training, test, evaluation.
brnn_ptb.main(None)
if __name__ == '__main__':
tf.test.main()
| Python | 0 |
88a1cb9001b19f769f8be5dcde5d87be67c61a2f | comment out 1.6 | streams/inference/back_integrate.py | streams/inference/back_integrate.py | # coding: utf-8
""" Contains likelihood function specific to back-integration and
the Rewinder
"""
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import numpy as np
import astropy.units as u
# Project
from ..coordinates import _hel_to_gc, _gc_to_hel
from ..dynamics import Particle
from ..integrate import LeapfrogIntegrator
__all__ = ["back_integration_likelihood"]
def xyz_sph_jac(hel):
l,b,d,mul,mub,vr = hel.T
cosl, sinl = np.cos(l), np.sin(l)
cosb, sinb = np.cos(b), np.sin(b)
Rsun = 8.
dtmnt = d**2*(Rsun**2*cosb + Rsun*d*sinb**2*cosl - 2*Rsun*d*cosl + d**2*sinb**4*cosb - d**2*cosb**5 + 2*d**2*cosb**3)*cosb
deet = np.log(np.abs(dtmnt))
return deet
def back_integration_likelihood(t1, t2, dt, potential, p_hel, s_hel, tub):
p_gc = _hel_to_gc(p_hel)
s_gc = _hel_to_gc(s_hel)
gc = np.vstack((s_gc,p_gc)).copy()
acc = np.zeros_like(gc[:,:3])
integrator = LeapfrogIntegrator(potential._acceleration_at,
np.array(gc[:,:3]), np.array(gc[:,3:]),
args=(gc.shape[0], acc))
times, rs, vs = integrator.run(t1=t1, t2=t2, dt=dt)
s_orbit = np.vstack((rs[:,0][:,np.newaxis].T, vs[:,0][:,np.newaxis].T)).T
p_orbits = np.vstack((rs[:,1:].T, vs[:,1:].T)).T
# These are the unbinding time indices for each particle
t_idx = np.array([np.argmin(np.fabs(times - t)) for t in tub])
# get back 6D positions for stars and satellite at tub
p_x = np.array([p_orbits[jj,ii] for ii,jj in enumerate(t_idx)])
s_x = np.array([s_orbit[jj,0] for jj in t_idx])
rel_x = p_x-s_x
p_x_hel = _gc_to_hel(p_x)
jac1 = xyz_sph_jac(p_x_hel)
r_tide = potential._tidal_radius(2.5e8, s_x)#*1.6
#v_esc = potential._escape_velocity(2.5e8, r_tide=r_tide)
v_disp = 0.017198632325
R = np.sqrt(np.sum(rel_x[...,:3]**2, axis=-1))
V = np.sqrt(np.sum(rel_x[...,3:]**2, axis=-1))
lnR = np.log(R)
lnV = np.log(V)
sigma_r = 0.55
mu_r = np.log(r_tide)
r_term = -0.5*(2*np.log(sigma_r) + ((lnR-mu_r)/sigma_r)**2) - np.log(R**3)
sigma_v = 0.8
mu_v = np.log(v_disp)
v_term = -0.5*(2*np.log(sigma_v) + ((lnV-mu_v)/sigma_v)**2) - np.log(V**3)
return r_term + v_term + jac1
| # coding: utf-8
""" Contains likelihood function specific to back-integration and
the Rewinder
"""
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import numpy as np
import astropy.units as u
# Project
from ..coordinates import _hel_to_gc, _gc_to_hel
from ..dynamics import Particle
from ..integrate import LeapfrogIntegrator
__all__ = ["back_integration_likelihood"]
def xyz_sph_jac(hel):
l,b,d,mul,mub,vr = hel.T
cosl, sinl = np.cos(l), np.sin(l)
cosb, sinb = np.cos(b), np.sin(b)
Rsun = 8.
dtmnt = d**2*(Rsun**2*cosb + Rsun*d*sinb**2*cosl - 2*Rsun*d*cosl + d**2*sinb**4*cosb - d**2*cosb**5 + 2*d**2*cosb**3)*cosb
deet = np.log(np.abs(dtmnt))
return deet
def back_integration_likelihood(t1, t2, dt, potential, p_hel, s_hel, tub):
p_gc = _hel_to_gc(p_hel)
s_gc = _hel_to_gc(s_hel)
gc = np.vstack((s_gc,p_gc)).copy()
acc = np.zeros_like(gc[:,:3])
integrator = LeapfrogIntegrator(potential._acceleration_at,
np.array(gc[:,:3]), np.array(gc[:,3:]),
args=(gc.shape[0], acc))
times, rs, vs = integrator.run(t1=t1, t2=t2, dt=dt)
s_orbit = np.vstack((rs[:,0][:,np.newaxis].T, vs[:,0][:,np.newaxis].T)).T
p_orbits = np.vstack((rs[:,1:].T, vs[:,1:].T)).T
# These are the unbinding time indices for each particle
t_idx = np.array([np.argmin(np.fabs(times - t)) for t in tub])
# get back 6D positions for stars and satellite at tub
p_x = np.array([p_orbits[jj,ii] for ii,jj in enumerate(t_idx)])
s_x = np.array([s_orbit[jj,0] for jj in t_idx])
rel_x = p_x-s_x
p_x_hel = _gc_to_hel(p_x)
jac1 = xyz_sph_jac(p_x_hel)
r_tide = potential._tidal_radius(2.5e8, s_x)*1.6
#v_esc = potential._escape_velocity(2.5e8, r_tide=r_tide)
v_disp = 0.017198632325
R = np.sqrt(np.sum(rel_x[...,:3]**2, axis=-1))
V = np.sqrt(np.sum(rel_x[...,3:]**2, axis=-1))
lnR = np.log(R)
lnV = np.log(V)
sigma_r = 0.55
mu_r = np.log(r_tide)
r_term = -0.5*(2*np.log(sigma_r) + ((lnR-mu_r)/sigma_r)**2) - np.log(R**3)
sigma_v = 0.8
mu_v = np.log(v_disp)
v_term = -0.5*(2*np.log(sigma_v) + ((lnV-mu_v)/sigma_v)**2) - np.log(V**3)
return r_term + v_term + jac1
| Python | 0 |
c2a3443bd129b51df82826806829d50d6c01ee69 | remove password | demo.py | demo.py | import chineseseg
string = "蘇迪勒颱風造成土石崩塌,供應台北市用水的南勢溪挾帶大量泥沙,原水濁度一度飆高。"
ckip = chineseseg.Ckip("myaccount", "mypassword")
stanford = chineseseg.stanford("/home/wlzhuang/stanford-segmenter-2015-04-20/stanford-segmenter-3.5.2.jar", debug=True)
print( "stanford:", stanford.segment(string) )
print( "ckip:", ckip.segment(string) )
| import chineseseg
string = "蘇迪勒颱風造成土石崩塌,供應台北市用水的南勢溪挾帶大量泥沙,原水濁度一度飆高。"
ckip = chineseseg.Ckip("wlzhuang", "xxxxaaaackip")
stanford = chineseseg.stanford("/home/wlzhuang/stanford-segmenter-2015-04-20/stanford-segmenter-3.5.2.jar", debug=True)
print( "stanford:", stanford.segment(string) )
print( "ckip:", ckip.segment(string) )
| Python | 0.000449 |
c1ae43fd33cd0f8eb3e270907a8ed7e728d1e268 | Add captured_at timestamp to POST payload | server.py | server.py | import evdev
import requests
import json
import datetime
import yaml
def main():
config = load_config()
dev = evdev.InputDevice(config['device_path'])
output_line('Initialized - Capturing device: ' + str(dev))
for event in dev.read_loop():
if event.type == evdev.ecodes.EV_KEY:
output_line(event)
payload = build_payload(event)
output_line('Sending ' + str(payload) + ' to ' + config['post_url'])
response = requests.post(config['post_url'], json.dumps(payload))
output_line(response)
def build_payload(event):
event = evdev.categorize(event)
return {
'code': event.scancode,
'key': event.keycode[0] if type(event.keycode) == list else event.keycode,
'state': {0: 'UP', 1: 'DOWN', 2: 'HOLD'}[event.keystate],
'captured_at': datetime.datetime.fromtimestamp(event.event.timestamp()).isoformat()
}
def load_config():
with open('config.yml', 'r') as f:
return yaml.safe_load(f.read())
def timestamp_s():
return '[' + str(datetime.datetime.now()) + ']'
def output_line(string):
print(timestamp_s() + ' ' + str(string))
if __name__ == '__main__':
main()
| import evdev
import requests
import json
import datetime
import yaml
def main():
config = load_config()
dev = evdev.InputDevice(config['device_path'])
output_line('Initialized - Capturing device: ' + str(dev))
for event in dev.read_loop():
if event.type == evdev.ecodes.EV_KEY:
event = evdev.categorize(event)
output_line(event)
payload = build_payload(event)
output_line('Sending ' + str(payload) + ' to ' + config['post_url'])
response = requests.post(config['post_url'], json.dumps(payload))
output_line(response)
def build_payload(event):
return {
'code': event.scancode,
'key': event.keycode[0] if type(event.keycode) == list else event.keycode,
'state': {0: 'UP', 1: 'DOWN', 2: 'HOLD'}[event.keystate]
}
def load_config():
with open('config.yml', 'r') as f:
return yaml.safe_load(f.read())
def timestamp_s():
return '[' + str(datetime.datetime.now()) + ']'
def output_line(string):
print(timestamp_s() + ' ' + str(string))
if __name__ == '__main__':
main()
| Python | 0.000076 |
13b6e289f3ced59068d91dff2b2ef12a7805fabe | Create test definitions. | test/test_cronquot.py | test/test_cronquot.py | import unittest
import os
from cronquot.cronquot import has_directory
class CronquotTest(unittest.TestCase):
def test_has_directory(self):
sample_dir = os.path.join(
os.path.dirname(__file__), 'crontab')
self.assertTrue(has_directory(sample_dir))
def test_parse_command(self):
pass
def test_is_cron_script(self):
pass
def test_normalize_cron_script(self):
pass
def test_has_cosistency_in_result(self):
pass
def test_simple_cron_pattern(self):
pass
if __name__ == '__main__':
unittest.test()
| import unittest
import os
from cronquot.cronquot import has_directory
class CronquotTest(unittest.TestCase):
def test_has_directory(self):
sample_dir = os.path.join(
os.path.dirname(__file__), 'crontab')
self.assertTrue(has_directory(sample_dir))
if __name__ == '__main__':
unittest.test()
| Python | 0 |
e20cdd293a045dfca1829eed522cb941da2fb558 | remove cruft | server.py | server.py | import json
import os
from bottle import get, run, post, request, HTTPError, response
from requests import Session
from requests import post as request_post
# ----------------------------------------------------------------------------
# Settings
# ----------------------------------------------------------------------------
env_var_names = (
'GITHUB_API_KEY',
'GITHUB_ORGANIZATION_NAME',
'SLACK_API_TOKEN',
'SLACK_API_SECRET',
'SLACK_TEAM_NAME',
)
env = {}
for name in env_var_names:
env[name] = os.environ.get(name, None)
assert env[name], "Missing environment variable: %s" % name
# ----------------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------------
def github_request(method, endpoint, data=None):
github_session = Session()
github_session.auth = (env['GITHUB_API_KEY'], 'x-oauth-basic')
base_url = 'https://api.github.com'
method_func = getattr(github_session, method.lower())
response = method_func(
base_url + endpoint,
data=data
)
return response
def github_add_member_to_org(github_username):
return github_request(
'PUT',
'/orgs/%s/memberships/%s' % (env['GITHUB_ORGANIZATION_NAME'], github_username),
data=json.dumps({"role": "member"})
)
def slack_invite(email):
print env['SLACK_API_TOKEN']
return request_post(
'https://%s.slack.com/api/users.admin.invite' % (env['SLACK_TEAM_NAME']),
data=json.dumps({
"token": env['SLACK_API_TOKEN'],
"email": email,
"set_active": True,
}),
headers={
'Content-type': 'application/json',
'Accept': 'text/plain'
}
)
# ----------------------------------------------------------------------------
# Views / server
# ----------------------------------------------------------------------------
@post('/add')
def add():
# Parse input
if request.forms.get('token') != env['SLACK_API_TOKEN']:
# Make sure we got a request from the actual slack server not some ass hole
return HTTPError(status=403)
text = request.forms.get('text')
if not text or len(text.split(' ')) != 2:
response.status_code = 400
return {"error": "Invalid text input, should look like /onboard <github name>; <email>"}
github_username, email = text.split(' ')
github_username = github_username.strip()
email = email.strip()
# Add to github
resp = github_add_member_to_org(github_username)
if resp.status_code != 200:
response.status_code = 500
return {"error": "Bad response from Github (%s): %s" % (resp.status_code, resp.content)}
# Add to slack
resp = slack_invite(email)
if resp.status_code != 200:
response.status_code = 500
return {"error": "Bad response from Slack (%s): %s" % (resp.status_code, resp.content)}
# Add to screenhero
# TODO
return "Successfully added user to Github, Slack and Screenhero... wee!"
@get("/")
def nice_index():
return "Hello, I am an <a href='https://github.com/dev-coop/onboard'>onboarding bot</a>!"
# Heroku sets PORT env var
run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
| import json
import os
from bottle import get, run, post, request, HTTPError, response
from requests import Session
from requests import post as request_post
# ----------------------------------------------------------------------------
# Settings
# ----------------------------------------------------------------------------
env_var_names = (
'GITHUB_API_KEY',
'GITHUB_ORGANIZATION_NAME',
'SLACK_API_TOKEN',
'SLACK_API_SECRET',
'SLACK_TEAM_NAME',
)
env = {}
for name in env_var_names:
env[name] = os.environ.get(name, None)
assert env[name], "Missing environment variable: %s" % name
# ----------------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------------
def github_request(method, endpoint, data=None):
github_session = Session()
github_session.auth = (env['GITHUB_API_KEY'], 'x-oauth-basic')
base_url = 'https://api.github.com'
method_func = getattr(github_session, method.lower())
response = method_func(
base_url + endpoint,
data=data
)
return response
def github_add_member_to_org(github_username):
return github_request(
'PUT',
'/orgs/%s/memberships/%s' % (env['GITHUB_ORGANIZATION_NAME'], github_username),
data=json.dumps({"role": "member"})
)
def slack_invite(email):
print env['SLACK_API_TOKEN']
return request_post(
'https://%s.slack.com/api/users.admin.invite' % (env['SLACK_TEAM_NAME']),
data=json.dumps({
"token": env['SLACK_API_TOKEN'],
"email": email,
"set_active": True,
}),
headers={
'Content-type': 'application/json',
'Accept': 'text/plain'
}
)
import ipdb;ipdb.set_trace()
# ----------------------------------------------------------------------------
# Views / server
# ----------------------------------------------------------------------------
@post('/add')
def add():
# Parse input
if request.forms.get('token') != env['SLACK_API_TOKEN']:
# Make sure we got a request from the actual slack server not some ass hole
return HTTPError(status=403)
text = request.forms.get('text')
if not text or len(text.split(' ')) != 2:
response.status_code = 400
return {"error": "Invalid text input, should look like /onboard <github name>; <email>"}
github_username, email = text.split(' ')
github_username = github_username.strip()
email = email.strip()
# Add to github
resp = github_add_member_to_org(github_username)
if resp.status_code != 200:
response.status_code = 500
return {"error": "Bad response from Github (%s): %s" % (resp.status_code, resp.content)}
# Add to slack
resp = slack_invite(email)
if resp.status_code != 200:
response.status_code = 500
return {"error": "Bad response from Slack (%s): %s" % (resp.status_code, resp.content)}
# Add to screenhero
# TODO
return "Successfully added user to Github, Slack and Screenhero... wee!"
@get("/")
def nice_index():
return "Hello, I am an <a href='https://github.com/dev-coop/onboard'>onboarding bot</a>!"
# Heroku sets PORT env var
run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
| Python | 0 |
5e4d3c0b28104c1e98ed3e426dab9fc5d4d5a960 | Add more comments to loadfail test | test/test_loadfail.py | test/test_loadfail.py | #!bin/env python
import subprocess
import os.path
import unittest, re
class TestSaveLoad(unittest.TestCase):
@classmethod
def setUpClass(self):
# ensure we start with a clean slate, just in case
subprocess.call('rm -rf remote local 2>> /dev/null', shell=True)
# Initialize "remote" repositories
subprocess.call('mkdir remote; mkdir local', shell=True)
subprocess.call('cd remote; mkdir parent; cd parent; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child; cd child; git init --bare', shell=True)
# Initialize "local" repositories
subprocess.call('cd local; git clone ../remote/parent', shell=True)
subprocess.call('cd local; git clone ../remote/child', shell=True)
# Add a .gitproj to the parent repo, and make child a subrepo of parent
subprocess.call('cd local/parent; echo "version: 0.1.0" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "repos:" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc child ../../remote/child" >> .gitproj', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Initial Commit"; git push -u origin master', shell=True)
def test_init(self):
# Initialize git-project (clones child into parent)
subprocess.call('cd local/parent; git project init', shell=True)
subprocess.call('cd local/parent; git add .gitignore; git commit -m ".gitignore"; git push', shell=True)
# Ensure child was cloned properly
output = subprocess.call('test -d local/parent/child;', shell=True)
self.assertEqual(output, 0)
# Ensure child's origin is set correctly
output = subprocess.check_output('cd local/parent/child; git remote show origin | grep Fetch | grep remote/child | wc -l', shell=True)
self.assertEqual(output.strip(), '1')
# Add a commit to the child and update parent's .gitproj
subprocess.call('cd local/parent/child; echo "Asdf" > test.txt; git add test.txt; git commit -m "Initial Commit"; git push', shell=True)
subprocess.call('cd local/parent; git project save -f', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Save Sub-Repository State"', shell=True)
# Change the .gitproj so it is invalid
subprocess.call('cd local/parent; sed \$d .gitproj > .gitproj2; echo " c master nonexistantcommit" >> .gitproj2', shell=True)
# Ensure loading the invalid .gitproj returns a non-zero error code
subprocess.call('cd local/parent; mv .gitproj2 .gitproj', shell=True)
res = subprocess.call('cd local/parent; git project load', shell=True)
self.assertEqual(res, 1)
@classmethod
def tearDownClass(self):
# Remove remote and local repos
subprocess.call('rm -rf remote local', shell=True)
if __name__ == '__main__':
unittest.main()
| #!bin/env python
import subprocess
import os.path
import unittest, re
class TestSaveLoad(unittest.TestCase):
@classmethod
def setUpClass(self):
subprocess.call('rm -rf remote local 2>> /dev/null', shell=True)
subprocess.call('mkdir remote; mkdir local', shell=True)
subprocess.call('cd remote; mkdir parent; cd parent; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child; cd child; git init --bare', shell=True)
subprocess.call('cd local; git clone ../remote/parent', shell=True)
subprocess.call('cd local; git clone ../remote/child', shell=True)
subprocess.call('cd local/parent; echo "version: 0.1.0" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "repos:" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc child ../../remote/child" >> .gitproj', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Initial Commit"; git push -u origin master', shell=True)
def test_init(self):
subprocess.call('cd local/parent; git project init', shell=True)
subprocess.call('cd local/parent; git add .gitignore; git commit -m ".gitignore"; git push', shell=True)
output = subprocess.call('test -d local/parent/child;', shell=True)
self.assertEqual(output, 0)
output = subprocess.check_output('cd local/parent/child; git remote show origin | grep Fetch | grep remote/child | wc -l', shell=True)
self.assertEqual(output.strip(), '1')
subprocess.call('cd local/parent/child; echo "Asdf" > test.txt; git add test.txt; git commit -m "Initial Commit"; git push', shell=True)
subprocess.call('cd local/parent; git project save -f', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Save Sub-Repository State"', shell=True)
subprocess.call('cd local/parent; sed \$d .gitproj > .gitproj2; echo " c master nonexistantcommit" >> .gitproj2', shell=True)
subprocess.call('cd local/parent; mv .gitproj2 .gitproj', shell=True)
res = subprocess.call('cd local/parent; git project load', shell=True)
self.assertEqual(res, 1)
@classmethod
def tearDownClass(self):
subprocess.call('rm -rf remote local', shell=True)
if __name__ == '__main__':
unittest.main()
| Python | 0 |
c138182451d1e3937cf5f923c9b927dc97a97f38 | Fix the logger fix | mycroft/util/signal.py | mycroft/util/signal.py | import os
import os.path
import tempfile
import mycroft
import time
from mycroft.util.log import getLogger
LOG = getLogger(__name__)
def get_ipc_directory(domain=None):
"""Get the directory used for Inter Process Communication
Files in this folder can be accessed by different processes on the
machine. Useful for communication. This is often a small RAM disk.
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
Returns:
str: a path to the IPC directory
"""
config = mycroft.configuration.ConfigurationManager.instance()
dir = config.get("ipc_path")
if not dir:
# If not defined, use /tmp/mycroft/ipc
dir = os.path.join(tempfile.gettempdir(), "mycroft", "ipc")
return ensure_directory_exists(dir, domain)
def ensure_directory_exists(dir, domain=None):
""" Create a directory and give access rights to all
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
Returns:
str: a path to the directory
"""
if domain:
dir = os.path.join(dir, domain)
dir = os.path.normpath(dir)
if not os.path.isdir(dir):
try:
save = os.umask(0)
os.makedirs(dir, 0777) # give everyone rights to r/w here
except OSError:
LOG.warn("Failed to create: " + dir)
pass
finally:
os.umask(save)
return dir
def create_file(filename):
""" Create the file filename and create any directories needed
Args:
filename: Path to the file to be created
"""
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, 'w') as f:
f.write('')
def create_signal(signal_name):
"""Create a named signal
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
"""
try:
path = os.path.join(get_ipc_directory(), "signal", signal_name)
create_file(path)
return os.path.isfile(path)
except IOError:
return False
def check_for_signal(signal_name, sec_lifetime=0):
"""See if a named signal exists
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
sec_lifetime (int, optional): How many seconds the signal should
remain valid. If 0 or not specified, it is a single-use signal.
If -1, it never expires.
Returns:
bool: True if the signal is defined, False otherwise
"""
path = os.path.join(get_ipc_directory(), "signal", signal_name)
if os.path.isfile(path):
if sec_lifetime == 0:
# consume this single-use signal
os.remove(path)
elif sec_lifetime == -1:
return True
elif int(os.path.getctime(path) + sec_lifetime) < int(time.time()):
# remove once expired
os.remove(path)
return False
return True
# No such signal exists
return False
| import os
import os.path
import tempfile
import mycroft
import time
from mycroft.util.logging import getLogger
LOG = getLogger(__name__)
def get_ipc_directory(domain=None):
"""Get the directory used for Inter Process Communication
Files in this folder can be accessed by different processes on the
machine. Useful for communication. This is often a small RAM disk.
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
Returns:
str: a path to the IPC directory
"""
config = mycroft.configuration.ConfigurationManager.instance()
dir = config.get("ipc_path")
if not dir:
# If not defined, use /tmp/mycroft/ipc
dir = os.path.join(tempfile.gettempdir(), "mycroft", "ipc")
return ensure_directory_exists(dir, domain)
def ensure_directory_exists(dir, domain=None):
""" Create a directory and give access rights to all
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
Returns:
str: a path to the directory
"""
if domain:
dir = os.path.join(dir, domain)
dir = os.path.normpath(dir)
if not os.path.isdir(dir):
try:
save = os.umask(0)
os.makedirs(dir, 0777) # give everyone rights to r/w here
except OSError:
LOG.warn("Failed to create: " + dir)
pass
finally:
os.umask(save)
return dir
def create_file(filename):
""" Create the file filename and create any directories needed
Args:
filename: Path to the file to be created
"""
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, 'w') as f:
f.write('')
def create_signal(signal_name):
"""Create a named signal
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
"""
try:
path = os.path.join(get_ipc_directory(), "signal", signal_name)
create_file(path)
return os.path.isfile(path)
except IOError:
return False
def check_for_signal(signal_name, sec_lifetime=0):
"""See if a named signal exists
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
sec_lifetime (int, optional): How many seconds the signal should
remain valid. If 0 or not specified, it is a single-use signal.
If -1, it never expires.
Returns:
bool: True if the signal is defined, False otherwise
"""
path = os.path.join(get_ipc_directory(), "signal", signal_name)
if os.path.isfile(path):
if sec_lifetime == 0:
# consume this single-use signal
os.remove(path)
elif sec_lifetime == -1:
return True
elif int(os.path.getctime(path) + sec_lifetime) < int(time.time()):
# remove once expired
os.remove(path)
return False
return True
# No such signal exists
return False
| Python | 0 |
38b078eb13a42bf65d1f55141a69fcd8819a1f00 | add models | mysite/polls/models.py | mysite/polls/models.py | from django.db import models
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
| from django.db import models
# Create your models here.
| Python | 0 |
bd2d7fe62361594c7a659c2a38938521b6346dba | Text means no encode() | server.py | server.py | #!/usr/bin/env python3
# coding: utf-8
from aiohttp import web
import aiohttp_jinja2
import asyncio
import functools
import jinja2
import os
import sys
import time
import webbrowser
from pypi_top_packages_async import get_packages_info
from pypi_create_index_html import build_template_values
START_TIME = time.time()
MAX_PKGS = 200 # User can override this by entering a value on the commandline
PORT = int(os.getenv('PORT', 8000)) # Cloud will provide a web server PORT id
try: # Immediately change current directory to avoid exposure of control files
os.chdir('static_parent_dir')
except FileNotFoundError:
pass
try: # See if the user entered a maximum packages number on the commandline
max_pkgs = int(sys.argv[1])
except (IndexError, ValueError):
max_pkgs = MAX_PKGS
app = web.Application()
def done_callback(fut, app=None): # Called when PyPI data capture is complete
app = app or {}
elapsed = time.time() - START_TIME
app['packages'], app['data_datetime'] = fut.result()
fmt = ' Gathered Python 3 support info on {:,} PyPI packages in {:.2f} seconds.'
print(fmt.format(len(app['packages']), elapsed))
fut = asyncio.run_coroutine_threadsafe(get_packages_info(max_pkgs, START_TIME),
app.loop)
fut.add_done_callback(functools.partial(done_callback, app=app))
async def index_handler(request):
try: # return index.html if it exists
with open('index.html') as in_file:
return web.Response(text=in_file.read())
except FileNotFoundError:
return web.Response(text='Processing: Please refresh this page')
@aiohttp_jinja2.template('index_db.html')
async def handler(request):
packages = request.app.get('packages', None)
if not packages: # if data capture still ongoing, default to index.html
return await index_handler(request)
max_pkgs = request.match_info.get('max_pkgs', '').split('.')[0]
max_pkgs = ''.join(c for c in max_pkgs if c.isdigit())
max_pkgs = max(int(max_pkgs) if max_pkgs else 0, 200)
return build_template_values(packages[:max_pkgs],
request.app.get('data_datetime'))
def run_webserver(app, port=PORT):
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(os.curdir))
app.router.add_route('GET', '/', index_handler)
app.router.add_route('GET', '/{max_pkgs}', handler)
app.router.add_static('/static/', path='./static')
web.run_app(app, port=PORT)
async def launch_browser(port=PORT):
asyncio.sleep(0.2) # give the server a fifth of a second to come up
webbrowser.open('localhost:{}'.format(port))
if PORT == 8000: # we are running the server on localhost
asyncio.run_coroutine_threadsafe(launch_browser(PORT), app.loop)
run_webserver(app, port=PORT)
| #!/usr/bin/env python3
# coding: utf-8
from aiohttp import web
import aiohttp_jinja2
import asyncio
import functools
import jinja2
import os
import sys
import time
import webbrowser
from pypi_top_packages_async import get_packages_info
from pypi_create_index_html import build_template_values
START_TIME = time.time()
MAX_PKGS = 200 # User can override this by entering a value on the commandline
PORT = int(os.getenv('PORT', 8000)) # Cloud will provide a web server PORT id
try: # Immediately change current directory to avoid exposure of control files
os.chdir('static_parent_dir')
except FileNotFoundError:
pass
try: # See if the user entered a maximum packages number on the commandline
max_pkgs = int(sys.argv[1])
except (IndexError, ValueError):
max_pkgs = MAX_PKGS
app = web.Application()
def done_callback(fut, app=None): # Called when PyPI data capture is complete
app = app or {}
elapsed = time.time() - START_TIME
app['packages'], app['data_datetime'] = fut.result()
fmt = ' Gathered Python 3 support info on {:,} PyPI packages in {:.2f} seconds.'
print(fmt.format(len(app['packages']), elapsed))
fut = asyncio.run_coroutine_threadsafe(get_packages_info(max_pkgs, START_TIME),
app.loop)
fut.add_done_callback(functools.partial(done_callback, app=app))
async def index_handler(request):
try: # return index.html if it exists
with open('index.html') as in_file:
return web.Response(text=in_file.read().encode())
except FileNotFoundError:
return web.Response(text='Processing: Please refresh this page')
@aiohttp_jinja2.template('index_db.html')
async def handler(request):
packages = request.app.get('packages', None)
if not packages: # if data capture still ongoing, default to index.html
return await index_handler(request)
max_pkgs = request.match_info.get('max_pkgs', '').split('.')[0]
max_pkgs = ''.join(c for c in max_pkgs if c.isdigit())
max_pkgs = max(int(max_pkgs) if max_pkgs else 0, 200)
return build_template_values(packages[:max_pkgs],
request.app.get('data_datetime'))
def run_webserver(app, port=PORT):
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(os.curdir))
app.router.add_route('GET', '/', index_handler)
app.router.add_route('GET', '/{max_pkgs}', handler)
app.router.add_static('/static/', path='./static')
web.run_app(app, port=PORT)
async def launch_browser(port=PORT):
asyncio.sleep(0.2) # give the server a fifth of a second to come up
webbrowser.open('localhost:{}'.format(port))
if PORT == 8000: # we are running the server on localhost
asyncio.run_coroutine_threadsafe(launch_browser(PORT), app.loop)
run_webserver(app, port=PORT)
| Python | 0.998498 |
29835c78d1ddfd934aa552f4c68117a32379c5ea | add lmsd.sqlite | mzos/tests/__init__.py | mzos/tests/__init__.py | from __future__ import absolute_import
import zipfile
import os.path as op
import os
import shutil
import logging
class WithHMDBMixin(object):
@staticmethod
def unzip_hmdb():
"""
Utility to unzip hmdb for test purposes
:param self:
:return:
"""
z = zipfile.ZipFile(op.abspath('mzos/ressources/hmdb.zip'))
hmdb_path = z.extract('hmdb.sqlite')
logging.info("Moving extracted archive...")
shutil.move(hmdb_path, 'mzos/ressources/hmdb.sqlite')
logging.info("Done")
@staticmethod
def remove_hmdb():
logging.info("removing 'hmdb.sqlite'...")
try:
os.remove(op.abspath('mzos/ressources/hmdb.sqlite'))
logging.info("Done")
except OSError:
logging.error("Unable to remove sqlite file or file does not exist") | from __future__ import absolute_import
import zipfile
import os.path as op
import os
import shutil
import logging
class WithHMDBMixin(object):
@staticmethod
def unzip_hmdb():
"""
Utility to unzip hmdb for test purposes
:param self:
:return:
"""
abspath = op.abspath('mzos/ressources/hmdb.zip')
print abspath
z = zipfile.ZipFile(abspath)
hmdb_path = z.extract('hmdb.sqlite')
logging.info("Moving extracted archive...")
shutil.move(hmdb_path, abspath)
logging.info("Done")
@staticmethod
def remove_hmdb():
logging.info("removing 'hmdb.sqlite'...")
try:
os.remove(op.abspath('mzos/ressources/hmdb.sqlite'))
logging.info("Done")
except OSError:
logging.error("Unable to remove sqlite file or file does not exist")
| Python | 0.000001 |
3ffd045be41d226bcf1b533c3f5abf95a932eac0 | Remove duplicate test | webcomix/scrapy/tests/test_crawler_worker.py | webcomix/scrapy/tests/test_crawler_worker.py | import pytest
from webcomix.exceptions import NextLinkNotFound
from webcomix.scrapy.crawler_worker import CrawlerWorker
from webcomix.scrapy.verification.verification_spider import VerificationSpider
from webcomix.tests.fake_websites.fixture import one_webpage_uri
def test_spider_raising_error_gets_raised_by_crawler_worker(one_webpage_uri):
settings = {"LOG_ENABLED": False}
worker = CrawlerWorker(
settings,
False,
VerificationSpider,
start_urls=[one_webpage_uri],
next_page_selector="//div/@href",
comic_image_selector="//img/@src",
number_of_pages_to_check=2,
)
with pytest.raises(NextLinkNotFound):
worker.start()
| import pytest
from webcomix.exceptions import NextLinkNotFound
from webcomix.scrapy.crawler_worker import CrawlerWorker
from webcomix.scrapy.verification.verification_spider import VerificationSpider
from webcomix.tests.fake_websites.fixture import one_webpage_uri
def test_spider_raising_error_gets_raised_by_crawler_worker(one_webpage_uri):
settings = {"LOG_ENABLED": False}
worker = CrawlerWorker(
settings,
False,
VerificationSpider,
start_urls=[one_webpage_uri],
next_page_selector="//div/@href",
comic_image_selector="//img/@src",
number_of_pages_to_check=2,
)
with pytest.raises(NextLinkNotFound):
worker.start()
def test_spider_raising_error_gets_raised_by_crawler_worker(one_webpage_uri):
settings = {"LOG_ENABLED": False}
worker = CrawlerWorker(
settings,
False,
VerificationSpider,
start_urls=[one_webpage_uri],
next_page_selector="//div/@href",
comic_image_selector="//img/@src",
number_of_pages_to_check=2,
)
with pytest.raises(NextLinkNotFound):
worker.start() | Python | 0.000014 |
47ce98960cdfcd4c25109845047ad7fc6db2084b | Set serial default timeout to None | nanpy/serialmanager.py | nanpy/serialmanager.py | from nanpy.memo import memoized
import fnmatch
import logging
import serial
import sys
import time
DEFAULT_BAUDRATE = 115200
log = logging.getLogger(__name__)
PY3 = sys.version_info[0] == 3
class SerialManagerError(Exception):
pass
def _auto_detect_serial_unix(preferred_list=['*']):
import glob
glist = glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
glist += ['/dev/ttyS0', '/dev/ttyS1']
ret = []
for d in glist:
for preferred in preferred_list:
if fnmatch.fnmatch(d, preferred):
ret.append(d)
if len(ret) > 0:
return ret
for d in glist:
ret.append(d)
return ret
class NoneSerialManager(object):
def write(self, val):
pass
def read(self):
return ""
def readline(self):
return ""
class SerialManager(object):
_serial = None
def __init__(self, device=None,
baudrate=DEFAULT_BAUDRATE,
sleep_after_connect=2,
timeout=None):
self.device = device
self.baudrate = baudrate
self.sleep_after_connect = sleep_after_connect
self.timeout = timeout
def open(self, device=None):
'''open connection'''
if device:
self.device = device
if not self.device:
ports = _auto_detect_serial_unix()
if not len(ports):
raise SerialManagerError("No port was set, and no port was found!")
self.device = ports[0]
log.debug('opening port:%s [%s baud]' % (self.device, self.baudrate))
assert self.device
self._serial = serial.Serial(self.device,
self.baudrate,
timeout=self.timeout)
if self.sleep_after_connect:
time.sleep(self.sleep_after_connect)
self._serial.flushInput()
def write(self, value):
if not self._serial:
self.open()
log.debug('sending:%s' % repr(value))
if PY3:
self._serial.write(bytes(value, 'latin-1'))
else:
self._serial.write(value)
def readline(self):
if not self._serial:
self.open()
s = self._serial.readline()
log.debug('received:%s' % repr(s))
s = s.decode()
if not len(s):
raise SerialManagerError('Serial timeout!')
return s
def flush_input(self):
'''Flush input buffer, discarding all it's contents.'''
if not self._serial:
self.open()
self._serial.flushInput()
def close(self):
'''close connection'''
if self._serial:
self._serial.close()
self._serial = None
serial_manager = SerialManager()
| from nanpy.memo import memoized
import fnmatch
import logging
import serial
import sys
import time
DEFAULT_BAUDRATE = 115200
log = logging.getLogger(__name__)
PY3 = sys.version_info[0] == 3
class SerialManagerError(Exception):
pass
def _auto_detect_serial_unix(preferred_list=['*']):
import glob
glist = glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
glist += ['/dev/ttyS0', '/dev/ttyS1']
ret = []
for d in glist:
for preferred in preferred_list:
if fnmatch.fnmatch(d, preferred):
ret.append(d)
if len(ret) > 0:
return ret
for d in glist:
ret.append(d)
return ret
class NoneSerialManager(object):
def write(self, val):
pass
def read(self):
return ""
def readline(self):
return ""
class SerialManager(object):
_serial = None
def __init__(self, device=None,
baudrate=DEFAULT_BAUDRATE,
sleep_after_connect=2,
timeout=1):
self.device = device
self.baudrate = baudrate
self.sleep_after_connect = sleep_after_connect
self.timeout = timeout
def open(self, device=None):
'''open connection'''
if device:
self.device = device
if not self.device:
ports = _auto_detect_serial_unix()
if not len(ports):
raise SerialManagerError("No port was set, and no port was found!")
self.device = ports[0]
log.debug('opening port:%s [%s baud]' % (self.device, self.baudrate))
assert self.device
self._serial = serial.Serial(self.device,
self.baudrate,
timeout=self.timeout)
if self.sleep_after_connect:
time.sleep(self.sleep_after_connect)
self._serial.flushInput()
def write(self, value):
if not self._serial:
self.open()
log.debug('sending:%s' % repr(value))
if PY3:
self._serial.write(bytes(value, 'latin-1'))
else:
self._serial.write(value)
def readline(self):
if not self._serial:
self.open()
s = self._serial.readline()
log.debug('received:%s' % repr(s))
s = s.decode()
if not len(s):
raise SerialManagerError('Serial timeout!')
return s
def flush_input(self):
'''Flush input buffer, discarding all it's contents.'''
if not self._serial:
self.open()
self._serial.flushInput()
def close(self):
'''close connection'''
if self._serial:
self._serial.close()
self._serial = None
serial_manager = SerialManager()
| Python | 0.999346 |
ceb7b806c838a12d3447d0fd9bccc5aae49832d5 | Use a new session so that server will not receive signals | garage/multiprocessing/__init__.py | garage/multiprocessing/__init__.py | __all__ = [
'RpcConnectionError',
'RpcError',
'python',
]
import contextlib
import logging
import os
import os.path
import random
import shutil
import subprocess
import tempfile
import time
import garage.multiprocessing.server
from garage.multiprocessing.client import Connector
from garage.multiprocessing.client import RpcConnectionError
from garage.multiprocessing.client import RpcError
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
@contextlib.contextmanager
def python(executable='python2', protocol=2, authkey=None):
"""Start a server and return a Connector object
(default to python2).
"""
authkey = authkey or str(random.randint(1, 1e8))
with create_socket() as addr, start_server(executable, addr, authkey):
connector = Connector(addr, protocol, authkey)
try:
yield connector
finally:
connector.shutdown()
@contextlib.contextmanager
def create_socket():
tempdir = tempfile.mkdtemp()
try:
socket_path = tempfile.mktemp(dir=tempdir)
LOG.info('socket path %s', socket_path)
yield socket_path
finally:
LOG.info('remove socket path %s', socket_path)
shutil.rmtree(tempdir)
@contextlib.contextmanager
def start_server(executable, address, authkey):
script_path = garage.multiprocessing.server.__file__
args = [executable, script_path, '--listen-sock', address]
if LOG.isEnabledFor(logging.INFO):
args.append('-v')
env = dict(os.environ)
env['AUTHKEY'] = authkey
server_proc = subprocess.Popen(args, start_new_session=True, env=env)
try:
wait_file_creation(address, timeout=3)
yield server_proc
finally:
if server_proc.wait() != 0:
LOG.warning('server returns %d', server_proc.returncode)
def wait_file_creation(path, timeout):
end_time = time.time() + timeout
while not os.path.exists(path):
time.sleep(0.1)
if end_time < time.time():
raise Exception('timeout')
| __all__ = [
'RpcConnectionError',
'RpcError',
'python',
]
import contextlib
import logging
import os
import os.path
import random
import shutil
import subprocess
import tempfile
import time
import garage.multiprocessing.server
from garage.multiprocessing.client import Connector
from garage.multiprocessing.client import RpcConnectionError
from garage.multiprocessing.client import RpcError
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
@contextlib.contextmanager
def python(executable='python2', protocol=2, authkey=None):
"""Start a server and return a Connector object
(default to python2).
"""
authkey = authkey or str(random.randint(1, 1e8))
with create_socket() as addr, start_server(executable, addr, authkey):
connector = Connector(addr, protocol, authkey)
try:
yield connector
finally:
connector.shutdown()
@contextlib.contextmanager
def create_socket():
tempdir = tempfile.mkdtemp()
try:
socket_path = tempfile.mktemp(dir=tempdir)
LOG.info('socket path %s', socket_path)
yield socket_path
finally:
LOG.info('remove socket path %s', socket_path)
shutil.rmtree(tempdir)
@contextlib.contextmanager
def start_server(executable, address, authkey):
script_path = garage.multiprocessing.server.__file__
args = [executable, script_path, '--listen-sock', address]
if LOG.isEnabledFor(logging.INFO):
args.append('-v')
env = dict(os.environ)
env['AUTHKEY'] = authkey
server_proc = subprocess.Popen(args, env=env)
try:
wait_file_creation(address, timeout=3)
yield server_proc
finally:
if server_proc.wait() != 0:
LOG.warning('server returns %d', server_proc.returncode)
def wait_file_creation(path, timeout):
end_time = time.time() + timeout
while not os.path.exists(path):
time.sleep(0.1)
if end_time < time.time():
raise Exception('timeout')
| Python | 0 |
8b23c91b83982a85fe8a711c587d7db50e0bc14a | take 4 | server.py | server.py | from flask import Flask, request
import json
import bot, setup
app = Flask(__name__)
PAT = '***REMOVED***'
PASSWORD = '***REMOVED***'
setup.create_persistent_menu(PAT)
mr_bot = bot.Bot(PAT)
@app.route('/', methods=['GET'])
def handle_verification():
print "Handling Verification"
if request.args.get('hub.verify_token', '') == PASSWORD:
print "Verification successful"
return request.args.get('hub.challenge', '')
else:
print "Verification failed"
return 'Error: Verification failed'
@app.route('/', methods=['POST'])
def handle_messages():
print "Handling Messages"
payload = request.get_data()
for sender, message in messaging_events(payload):
print "Incoming from %s: %s" % (sender, message)
mr_bot.act_on_message(sender, message)
return "ok"
def messaging_events(payload):
"""Generate tuples of (sender_id, message_text) from the
provided payload."""
data = json.loads(payload)
messaging_events = data["entry"][0]["messaging"]
for event in messaging_events:
# Messages
if "message" in event and "text" in event["message"]:
yield event["sender"]["id"], event["message"]["text"].encode('unicode_escape')
# Postbacks
elif "postback" in event and "payload" in event["postback"]:
yield event["sender"]["id"], event["postback"]["payload"].encode('unicode_escape')
else:
yield event["sender"]["id"], "I can't echo this"
if __name__ == '__main__':
app.run(debug=True)
| from flask import Flask, request
import json
import bot, setup
app = Flask(__name__)
PAT = '***REMOVED***'
PASSWORD = '***REMOVED***'
setup.create_persistent_menu(PAT)
mr_bot = create_bot(PAT)
def create_bot(token):
return bot.Bot(token)
@app.route('/', methods=['GET'])
def handle_verification():
print "Handling Verification"
if request.args.get('hub.verify_token', '') == PASSWORD:
print "Verification successful"
return request.args.get('hub.challenge', '')
else:
print "Verification failed"
return 'Error: Verification failed'
@app.route('/', methods=['POST'])
def handle_messages():
print "Handling Messages"
payload = request.get_data()
for sender, message in messaging_events(payload):
print "Incoming from %s: %s" % (sender, message)
mr_bot.act_on_message(sender, message)
return "ok"
def messaging_events(payload):
"""Generate tuples of (sender_id, message_text) from the
provided payload."""
data = json.loads(payload)
messaging_events = data["entry"][0]["messaging"]
for event in messaging_events:
# Messages
if "message" in event and "text" in event["message"]:
yield event["sender"]["id"], event["message"]["text"].encode('unicode_escape')
# Postbacks
elif "postback" in event and "payload" in event["postback"]:
yield event["sender"]["id"], event["postback"]["payload"].encode('unicode_escape')
else:
yield event["sender"]["id"], "I can't echo this"
if __name__ == '__main__':
app.run(debug=True)
| Python | 0.99946 |
05d2421668e663bf9e98ec51ec1d8977ffe8c1b3 | Add static folder | server.py | server.py | import os
from flask import Flask
from flask import send_from_directory
from flask_cors import CORS
from igc.controller.controller_register import register_controllers
from igc.util import cache
app = Flask(__name__, static_url_path='html')
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('database_uri', 'sqlite:///./sqllite.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
register_controllers(app)
@app.route("/")
def index():
return app.send_static_file('index.html')
@app.route("/<path:path>")
def send_static(path):
return send_from_directory('html', path)
if __name__ == '__main__':
thread = cache.CacheThread()
thread.start()
app.run(debug=True, port=5000)
# http_server = WSGIServer(('', 5000), app)
# http_server.serve_forever() | import os
from flask import Flask
from flask import send_from_directory
from flask_cors import CORS
from igc.controller.controller_register import register_controllers
from igc.util import cache
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('database_uri', 'sqlite:///./sqllite.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
register_controllers(app)
@app.route("/")
def index():
return app.send_static_file('index.html')
@app.route("/<path:path>")
def send_static(path):
return send_from_directory('html', path)
if __name__ == '__main__':
thread = cache.CacheThread()
thread.start()
app.run(debug=True, port=5000)
# http_server = WSGIServer(('', 5000), app)
# http_server.serve_forever() | Python | 0.000001 |
11e5ce0369250f7c979dc0fe9ea59f25cf12c1e7 | Fix after Python 3.11 deprecation of test methods returning values | test/test_toplevel.py | test/test_toplevel.py | import mpi4py
import unittest
import warnings
import os
class TestRC(unittest.TestCase):
@staticmethod
def newrc():
rc = type(mpi4py.rc)()
rc(initialize = rc.initialize)
rc(threads = rc.threads)
rc(thread_level = rc.thread_level)
rc(finalize = rc.finalize)
rc(fast_reduce = rc.fast_reduce)
rc(recv_mprobe = rc.recv_mprobe)
return rc
def testCallKwArgs(self):
rc = self.newrc()
kwargs = rc.__dict__.copy()
rc(**kwargs)
def testInitKwArgs(self):
rc = self.newrc()
kwargs = rc.__dict__.copy()
rc = type(mpi4py.rc)(**kwargs)
def testBadAttribute(self):
error = lambda: mpi4py.rc(ABCXYZ=123456)
self.assertRaises(TypeError, error)
error = lambda: setattr(mpi4py.rc, 'ABCXYZ', 123456)
self.assertRaises(TypeError, error)
error = lambda: getattr(mpi4py.rc, 'ABCXYZ')
self.assertRaises(AttributeError, error)
def testRepr(self):
repr(mpi4py.rc)
class TestConfig(unittest.TestCase):
def testGetInclude(self):
path = mpi4py.get_include()
self.assertTrue(isinstance(path, str))
self.assertTrue(os.path.isdir(path))
header = os.path.join(path, 'mpi4py', 'mpi4py.h')
self.assertTrue(os.path.isfile(header))
def testGetConfig(self):
conf = mpi4py.get_config()
self.assertTrue(isinstance(conf, dict))
mpicc = conf.get('mpicc')
if mpicc is not None:
self.assertTrue(os.path.exists(mpicc))
@unittest.skipIf(os.name != 'posix', 'not-posix')
class TestProfile(unittest.TestCase):
def testProfile(self):
import platform, sysconfig
bits = platform.architecture()[0][:-3]
triplet = sysconfig.get_config_var('MULTIARCH') or ''
libpath = [
f"{prefix}{suffix}"
for prefix in ("/lib", "/usr/lib")
for suffix in (bits, f"/{triplet}", "")
]
def mpi4py_profile(*args, **kargs):
try:
mpi4py.profile(*args, **kargs)
except ValueError:
pass
with warnings.catch_warnings():
warnings.simplefilter('error')
with self.assertRaises(UserWarning):
mpi4py.profile('hosts', path=["/etc"])
warnings.simplefilter('ignore')
for libname in ('c', 'm', 'dl', 'libdl.so.2'):
mpi4py_profile(libname, path=libpath)
for path in libpath:
mpi4py_profile(libname, path=path)
with self.assertRaises(ValueError):
mpi4py.profile('@querty')
with self.assertRaises(ValueError):
mpi4py.profile('@querty', path="/usr/lib")
with self.assertRaises(ValueError):
mpi4py.profile('@querty', path=["/usr/lib"])
with self.assertRaises(ValueError):
mpi4py.profile('@querty')
class TestPackage(unittest.TestCase):
def testImports(self):
import mpi4py
import mpi4py.MPI
import mpi4py.typing
import mpi4py.__main__
import mpi4py.bench
import mpi4py.futures
import mpi4py.futures.__main__
import mpi4py.futures.server
import mpi4py.util
import mpi4py.util.pkl5
import mpi4py.util.dtlib
import mpi4py.run
if __name__ == '__main__':
unittest.main()
| import mpi4py
import unittest
import warnings
import os
class TestRC(unittest.TestCase):
def testCall(self):
rc = type(mpi4py.rc)()
rc(initialize = rc.initialize)
rc(threads = rc.threads)
rc(thread_level = rc.thread_level)
rc(finalize = rc.finalize)
rc(fast_reduce = rc.fast_reduce)
rc(recv_mprobe = rc.recv_mprobe)
return rc
def testCallKwArgs(self):
rc = self.testCall()
kwargs = rc.__dict__.copy()
rc(**kwargs)
def testInitKwArgs(self):
rc = self.testCall()
kwargs = rc.__dict__.copy()
rc = type(mpi4py.rc)(**kwargs)
def testBadAttribute(self):
error = lambda: mpi4py.rc(ABCXYZ=123456)
self.assertRaises(TypeError, error)
error = lambda: setattr(mpi4py.rc, 'ABCXYZ', 123456)
self.assertRaises(TypeError, error)
error = lambda: getattr(mpi4py.rc, 'ABCXYZ')
self.assertRaises(AttributeError, error)
def testRepr(self):
repr(mpi4py.rc)
class TestConfig(unittest.TestCase):
def testGetInclude(self):
path = mpi4py.get_include()
self.assertTrue(isinstance(path, str))
self.assertTrue(os.path.isdir(path))
header = os.path.join(path, 'mpi4py', 'mpi4py.h')
self.assertTrue(os.path.isfile(header))
def testGetConfig(self):
conf = mpi4py.get_config()
self.assertTrue(isinstance(conf, dict))
mpicc = conf.get('mpicc')
if mpicc is not None:
self.assertTrue(os.path.exists(mpicc))
@unittest.skipIf(os.name != 'posix', 'not-posix')
class TestProfile(unittest.TestCase):
def testProfile(self):
import platform, sysconfig
bits = platform.architecture()[0][:-3]
triplet = sysconfig.get_config_var('MULTIARCH') or ''
libpath = [
f"{prefix}{suffix}"
for prefix in ("/lib", "/usr/lib")
for suffix in (bits, f"/{triplet}", "")
]
def mpi4py_profile(*args, **kargs):
try:
mpi4py.profile(*args, **kargs)
except ValueError:
pass
with warnings.catch_warnings():
warnings.simplefilter('error')
with self.assertRaises(UserWarning):
mpi4py.profile('hosts', path=["/etc"])
warnings.simplefilter('ignore')
for libname in ('c', 'm', 'dl', 'libdl.so.2'):
mpi4py_profile(libname, path=libpath)
for path in libpath:
mpi4py_profile(libname, path=path)
with self.assertRaises(ValueError):
mpi4py.profile('@querty')
with self.assertRaises(ValueError):
mpi4py.profile('@querty', path="/usr/lib")
with self.assertRaises(ValueError):
mpi4py.profile('@querty', path=["/usr/lib"])
with self.assertRaises(ValueError):
mpi4py.profile('@querty')
class TestPackage(unittest.TestCase):
def testImports(self):
import mpi4py
import mpi4py.MPI
import mpi4py.typing
import mpi4py.__main__
import mpi4py.bench
import mpi4py.futures
import mpi4py.futures.__main__
import mpi4py.futures.server
import mpi4py.util
import mpi4py.util.pkl5
import mpi4py.util.dtlib
import mpi4py.run
if __name__ == '__main__':
unittest.main()
| Python | 0.000026 |
d3f8922394ca2e18d624f1d542f2fc13a18475d3 | Make sorting links reset pagination | wnpp_debian_net/templatetags/sorting_urls.py | wnpp_debian_net/templatetags/sorting_urls.py | # Copyright (C) 2021 Sebastian Pipping <sebastian@pipping.org>
# Licensed under GNU Affero GPL v3 or later
from django import template
from ..url_tools import url_with_query
register = template.Library()
INTERNAL_DIRECTION_PREFIX_ASCENDING = ''
INTERNAL_DIRECTION_PREFIX_DESCENDING = '-'
EXTERNAL_DIRECTION_SUFFIX_ASCENDING = ';asc'
EXTERNAL_DIRECTION_SUFFIX_DESCENDING = ';desc'
_OPPOSITE_INTERNAL_PREFIX = {
INTERNAL_DIRECTION_PREFIX_ASCENDING: INTERNAL_DIRECTION_PREFIX_DESCENDING,
INTERNAL_DIRECTION_PREFIX_DESCENDING: INTERNAL_DIRECTION_PREFIX_ASCENDING,
}
_EXTERNAL_SUFFIX_FOR = {
INTERNAL_DIRECTION_PREFIX_ASCENDING: EXTERNAL_DIRECTION_SUFFIX_ASCENDING,
INTERNAL_DIRECTION_PREFIX_DESCENDING: EXTERNAL_DIRECTION_SUFFIX_DESCENDING,
}
def parse_sort_param(sort_param) -> tuple[str, str]:
split_sort_param = sort_param.split(';')
if len(split_sort_param) == 2 and split_sort_param[1] == 'desc':
order = INTERNAL_DIRECTION_PREFIX_DESCENDING
else:
order = INTERNAL_DIRECTION_PREFIX_ASCENDING
return split_sort_param[0], order
def combine_sort_param(column, internal_direction_prefix):
return column + _EXTERNAL_SUFFIX_FOR[internal_direction_prefix]
@register.simple_tag(takes_context=True)
def self_url_with_sorting_for(context, future_column):
"""
Takes the current page URL and adjusts the "sort=[..]" part
in the query parameters to sort for a specific column.
If the column is the same as the current one,
direction is flipped: from ascending to descending and back.
"""
url = context['request'].get_full_path()
current_column, internal_direction_prefix = parse_sort_param(context['sort'])
if future_column == current_column:
internal_direction_prefix = _OPPOSITE_INTERNAL_PREFIX[internal_direction_prefix]
future_sort = combine_sort_param(future_column, internal_direction_prefix)
return url_with_query(url, sort=future_sort, page=1)
| # Copyright (C) 2021 Sebastian Pipping <sebastian@pipping.org>
# Licensed under GNU Affero GPL v3 or later
from django import template
from ..url_tools import url_with_query
register = template.Library()
INTERNAL_DIRECTION_PREFIX_ASCENDING = ''
INTERNAL_DIRECTION_PREFIX_DESCENDING = '-'
EXTERNAL_DIRECTION_SUFFIX_ASCENDING = ';asc'
EXTERNAL_DIRECTION_SUFFIX_DESCENDING = ';desc'
_OPPOSITE_INTERNAL_PREFIX = {
INTERNAL_DIRECTION_PREFIX_ASCENDING: INTERNAL_DIRECTION_PREFIX_DESCENDING,
INTERNAL_DIRECTION_PREFIX_DESCENDING: INTERNAL_DIRECTION_PREFIX_ASCENDING,
}
_EXTERNAL_SUFFIX_FOR = {
INTERNAL_DIRECTION_PREFIX_ASCENDING: EXTERNAL_DIRECTION_SUFFIX_ASCENDING,
INTERNAL_DIRECTION_PREFIX_DESCENDING: EXTERNAL_DIRECTION_SUFFIX_DESCENDING,
}
def parse_sort_param(sort_param) -> tuple[str, str]:
split_sort_param = sort_param.split(';')
if len(split_sort_param) == 2 and split_sort_param[1] == 'desc':
order = INTERNAL_DIRECTION_PREFIX_DESCENDING
else:
order = INTERNAL_DIRECTION_PREFIX_ASCENDING
return split_sort_param[0], order
def combine_sort_param(column, internal_direction_prefix):
return column + _EXTERNAL_SUFFIX_FOR[internal_direction_prefix]
@register.simple_tag(takes_context=True)
def self_url_with_sorting_for(context, future_column):
"""
Takes the current page URL and adjusts the "sort=[..]" part
in the query parameters to sort for a specific column.
If the column is the same as the current one,
direction is flipped: from ascending to descending and back.
"""
url = context['request'].get_full_path()
current_column, internal_direction_prefix = parse_sort_param(context['sort'])
if future_column == current_column:
internal_direction_prefix = _OPPOSITE_INTERNAL_PREFIX[internal_direction_prefix]
future_sort = combine_sort_param(future_column, internal_direction_prefix)
return url_with_query(url, sort=future_sort)
| Python | 0 |
300e1461174107f1c2f8523ce105739d42d71803 | Write EMAIL_HOST to settings only if specified | fab_bundle/templates/settings.py | fab_bundle/templates/settings.py | from {{ base_settings }} import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = ({% for admin in admins %}
('{{ admin.name }}', '{{ admin.email }}'),{% endfor %}
)
MANAGERS = ADMINS
SEND_BROKEN_LINK_EMAILS = True
SECRET_KEY = '{{ secret_key }}'
BASE_URL = 'http{% if ssl_cert %}s{% endif %}://{{ http_host }}'
MEDIA_ROOT = '{{ media_root }}'
MEDIA_URL = BASE_URL + '/media/'
{% if staticfiles %}
STATIC_ROOT = '{{ static_root }}'
STATIC_URL = BASE_URL + '/static/'
{% endif %}
{% if cache >= 0 %}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'localhost:6379',
'OPTIONS': {
'DB': {{ cache }},
},
},
}
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
{% endif %}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '{{ http_host }}',
'USER': 'postgres',
}
}
{% if sentry_dsn %}
SENTRY_DSN = '{{ sentry_dsn }}'
{% endif %}
{% if email %}
EMAIL_SUBJECT_PREFIX = '[{{ http_host }}] '
SERVER_EMAIL = DEFAULT_FROM_EMAIL = '{{ email.from }}'
{% if email.host %}EMAIL_HOST = '{{ email.host }}'{% endif %}
{% if email.user %}EMAIL_HOST_USER = '{{ email.user }}'{% endif %}
{% if email.password %}EMAIL_HOST_PASSWORD = '{{ email.password }}'{% endif %}
{% if email.port %}EMAIL_PORT = {{ email.port }}{% endif %}
{% if email.backend %}EMAIL_BACKEND = '{{ email.user }}'{% endif %}
{% if email.tls %}EMAIL_USE_TLS = True{% endif %}
{% endif %}
SESSION_COOKIE_HTTPONLY = True{% if ssl_cert %}
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https'){% endif %}
{% if settings %}{{ settings|safe }}{% endif %}
| from {{ base_settings }} import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = ({% for admin in admins %}
('{{ admin.name }}', '{{ admin.email }}'),{% endfor %}
)
MANAGERS = ADMINS
SEND_BROKEN_LINK_EMAILS = True
SECRET_KEY = '{{ secret_key }}'
BASE_URL = 'http{% if ssl_cert %}s{% endif %}://{{ http_host }}'
MEDIA_ROOT = '{{ media_root }}'
MEDIA_URL = BASE_URL + '/media/'
{% if staticfiles %}
STATIC_ROOT = '{{ static_root }}'
STATIC_URL = BASE_URL + '/static/'
{% endif %}
{% if cache >= 0 %}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'localhost:6379',
'OPTIONS': {
'DB': {{ cache }},
},
},
}
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
{% endif %}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '{{ http_host }}',
'USER': 'postgres',
}
}
{% if sentry_dsn %}
SENTRY_DSN = '{{ sentry_dsn }}'
{% endif %}
{% if email %}
EMAIL_SUBJECT_PREFIX = '[{{ http_host }}] '
SERVER_EMAIL = DEFAULT_FROM_EMAIL = '{{ email.from }}'
EMAIL_HOST = '{{ email.host }}'
{% if email.user %}EMAIL_HOST_USER = '{{ email.user }}'{% endif %}
{% if email.password %}EMAIL_HOST_PASSWORD = '{{ email.password }}'{% endif %}
{% if email.port %}EMAIL_PORT = {{ email.port }}{% endif %}
{% if email.backend %}EMAIL_BACKEND = '{{ email.user }}'{% endif %}
{% if email.tls %}EMAIL_USE_TLS = True{% endif %}
{% endif %}
SESSION_COOKIE_HTTPONLY = True{% if ssl_cert %}
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https'){% endif %}
{% if settings %}{{ settings|safe }}{% endif %}
| Python | 0 |
5db8338ceb258bbe086d3314b5f50a9b6da5cd28 | Make import of _constant be absolute import for py3.x; See #388 | wrappers/Python/generate_constants_module.py | wrappers/Python/generate_constants_module.py | from __future__ import print_function
import os,shutil
"""
A little module to wrap the params enum for use in Cython code
Ian Bell, May 2014
"""
def params_constants(enum_key):
fName = os.path.join('..','..','include','DataStructures.h')
contents = open(fName,'r').read()
left = contents.find('{', contents.find('enum '+enum_key));
right = contents.find('}', left)
entries = contents[left+1:right]
if entries.find('/*') > -1: raise ValueError('/* */ style comments are not allowed, replace them with // style comments')
if not entries: raise ValueError('Unable to find '+enum_key)
lines = entries.split('\n')
lines = [line for line in lines if not line.strip().startswith('//')]
for i,line in enumerate(lines):
if line.find('/'):
lines[i] = line.split('/')[0]
if '=' in lines[i]:
lines[i] = lines[i].split('=')[0].strip() + ','
# Chomp all the whitespace, split at commas
keys = ''.join(lines).replace(' ','').split(',')
keys = [k for k in keys if k]
return keys
def generate_cython(data):
print('****** Writing the constants module ******')
# Write the PXD definition file
pxd_output_file = open('CoolProp/constants_header.pxd','w')
pxd_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\n\ncdef extern from "DataStructures.h" namespace "CoolProp":\n')
for enum_key, entries in data:
pxd_output_file.write('\tctypedef enum '+enum_key+':\n')
for param in entries:
param = param.strip()
pxd_output_file.write('\t\t'+param+'\n')
pxd_output_file.close()
# Write the PYX implementation file
pyx_output_file = open('CoolProp/_constants.pyx','w')
pyx_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\ncimport constants_header\n\n')
for enum_key, entries in data:
for param in entries:
param = param.strip()
pyx_output_file.write(param+' = '+'constants_header.'+param+'\n')
pyx_output_file.close()
# Write the PY implementation file
py_output_file = open('CoolProp/constants.py','w')
py_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\nfrom __future__ import absolute_import\n\nfrom . import _constants\n\n')
for enum_key, entries in data:
for param in entries:
param = param.strip()
py_output_file.write(param+' = '+'_constants.'+param+'\n')
py_output_file.close()
def generate():
data = [(enum,params_constants(enum)) for enum in ['parameters', 'input_pairs', 'fluid_types', 'phases']]
generate_cython(data)
if __name__=='__main__':
generate() | from __future__ import print_function
import os,shutil
"""
A little module to wrap the params enum for use in Cython code
Ian Bell, May 2014
"""
def params_constants(enum_key):
fName = os.path.join('..','..','include','DataStructures.h')
contents = open(fName,'r').read()
left = contents.find('{', contents.find('enum '+enum_key));
right = contents.find('}', left)
entries = contents[left+1:right]
if entries.find('/*') > -1: raise ValueError('/* */ style comments are not allowed, replace them with // style comments')
if not entries: raise ValueError('Unable to find '+enum_key)
lines = entries.split('\n')
lines = [line for line in lines if not line.strip().startswith('//')]
for i,line in enumerate(lines):
if line.find('/'):
lines[i] = line.split('/')[0]
if '=' in lines[i]:
lines[i] = lines[i].split('=')[0].strip() + ','
# Chomp all the whitespace, split at commas
keys = ''.join(lines).replace(' ','').split(',')
keys = [k for k in keys if k]
return keys
def generate_cython(data):
print('****** Writing the constants module ******')
# Write the PXD definition file
pxd_output_file = open('CoolProp/constants_header.pxd','w')
pxd_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\n\ncdef extern from "DataStructures.h" namespace "CoolProp":\n')
for enum_key, entries in data:
pxd_output_file.write('\tctypedef enum '+enum_key+':\n')
for param in entries:
param = param.strip()
pxd_output_file.write('\t\t'+param+'\n')
pxd_output_file.close()
# Write the PYX implementation file
pyx_output_file = open('CoolProp/_constants.pyx','w')
pyx_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\ncimport constants_header\n\n')
for enum_key, entries in data:
for param in entries:
param = param.strip()
pyx_output_file.write(param+' = '+'constants_header.'+param+'\n')
pyx_output_file.close()
# Write the PY implementation file
py_output_file = open('CoolProp/constants.py','w')
py_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\nimport _constants\n\n')
for enum_key, entries in data:
for param in entries:
param = param.strip()
py_output_file.write(param+' = '+'_constants.'+param+'\n')
py_output_file.close()
def generate():
data = [(enum,params_constants(enum)) for enum in ['parameters', 'input_pairs', 'fluid_types', 'phases']]
generate_cython(data)
if __name__=='__main__':
generate() | Python | 0 |
891a911b0523ce9ed42916c60934f52ba7dbedcb | Fix up 'limit' and 'expire' options for digest plugin. | flexget/plugins/plugin_digest.py | flexget/plugins/plugin_digest.py | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime
from sqlalchemy import Column, Unicode, PickleType, Integer, DateTime
from flexget import plugin
from flexget.db_schema import versioned_base
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import safe_pickle_synonym
from flexget.utils.tools import parse_timedelta
log = logging.getLogger('digest')
Base = versioned_base('digest', 0)
class DigestEntry(Base):
__tablename__ = 'digest_entries'
id = Column(Integer, primary_key=True)
list = Column(Unicode, index=True)
added = Column(DateTime, default=datetime.now)
_entry = Column('entry', PickleType)
entry = safe_pickle_synonym('_entry')
class OutputDigest(object):
schema = {'type': 'string'}
def on_task_learn(self, task, config):
# TODO: Configurable entry state?
with Session() as session:
for entry in task.accepted:
session.add(DigestEntry(list=config, entry=entry))
class EmitDigest(object):
schema = {
'type': 'object',
'properties': {
'list': {'type': 'string'},
'limit': {'type': 'integer', 'default': -1},
'expire': {
'oneOf': [
{'type': 'string', 'format': 'interval'},
{'type': 'boolean'}],
'default': True
}
},
'required': ['list'],
'additionalProperties': False
}
def on_task_input(self, task, config):
entries = []
with Session() as session:
digest_entries = session.query(DigestEntry).filter(DigestEntry.list == config['list'])
# Remove any entries older than the expire time, if defined.
if isinstance(config['expire'], basestring):
expire_time = parse_timedelta(config['expire'])
digest_entries.filter(DigestEntry.added < datetime.now() - expire_time).delete()
for index, digest_entry in enumerate(digest_entries.order_by(DigestEntry.added.desc()).all()):
# Just remove any entries past the limit, if set.
if 0 < config['limit'] <= index:
session.delete(digest_entry)
continue
entries.append(Entry(digest_entry.entry))
# If expire is 'True', we remove it after it is output once.
if config['expire'] is True:
session.delete(digest_entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(OutputDigest, 'digest', api_ver=2)
plugin.register(EmitDigest, 'emit_digest', api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime
from sqlalchemy import Column, Unicode, PickleType, Integer, DateTime
from flexget import plugin
from flexget.db_schema import versioned_base
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import safe_pickle_synonym
from flexget.utils.tools import parse_timedelta
log = logging.getLogger('digest')
Base = versioned_base('digest', 0)
class DigestEntry(Base):
__tablename__ = 'digest_entries'
id = Column(Integer, primary_key=True)
list = Column(Unicode, index=True)
added = Column(DateTime, default=datetime.now)
_entry = Column('entry', PickleType)
entry = safe_pickle_synonym('_entry')
class OutputDigest(object):
schema = {'type': 'string'}
def on_task_learn(self, task, config):
# TODO: Configurable entry state?
with Session() as session:
for entry in task.accepted:
session.add(DigestEntry(list=config, entry=entry))
class EmitDigest(object):
schema = {
'type': 'object',
'properties': {
'list': {'type': 'string'},
'limit': {'type': 'integer', 'default': -1},
'expire': {
'oneOf': [
{'type': 'string', 'format': 'interval'},
{'type': 'boolean'}],
'default': True
}
},
'required': ['list'],
'additionalProperties': False
}
def on_task_input(self, task, config):
entries = []
with Session() as session:
digest_entries = (session.query(DigestEntry).
filter(DigestEntry.list == config['list']).
order_by(DigestEntry.added.desc()))
if isinstance(config['expire'], basestring):
expire_time = parse_timedelta(config['expire'])
digest_entries.filter(DigestEntry.added < datetime.now() - expire_time).delete()
if config['limit'] > 0:
# TODO: This doesn't work, figure good way to clear extra
#digest_entries.offset(config['limit']).delete()
pass
for digest_entry in digest_entries.all():
entries.append(Entry(digest_entry.entry))
if config['expire'] is True:
session.delete(digest_entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(OutputDigest, 'digest', api_ver=2)
plugin.register(EmitDigest, 'emit_digest', api_ver=2)
| Python | 0 |
9694d5d0cbdcca874b791e6616dda831f8961373 | Add a little debugging to the Papilio target | flipsyfat/targets/papilio_pro.py | flipsyfat/targets/papilio_pro.py | #!/usr/bin/env python3
import argparse
from migen import *
from flipsyfat.cores.sd_emulator import SDEmulator
from flipsyfat.cores.sd_trigger import SDTrigger
from misoc.targets.papilio_pro import BaseSoC
from migen.build.generic_platform import *
from misoc.integration.soc_sdram import *
from misoc.integration.builder import *
io = [
("sdemu", 0,
Subsignal("clk", Pins("C:8")),
Subsignal("cmd", Pins("C:9")),
Subsignal("d", Pins("C:10 C:11 C:12 C:13")),
IOStandard("LVCMOS33")
),
("trigger", 0,
Pins("C:0 C:1 C:2 C:3 C:4 C:5 C:6 C:7"),
IOStandard("LVCMOS33")
),
("debug", 0,
Pins("C:14 C:15"),
IOStandard("LVCMOS33")
),
]
class Flipsyfat(BaseSoC):
mem_map = {
"sdemu": 0x30000000,
}
mem_map.update(BaseSoC.mem_map)
def __init__(self, **kwargs):
BaseSoC.__init__(self, **kwargs)
self.platform.add_extension(io)
self.submodules.sdemu = SDEmulator(self.platform, self.platform.request("sdemu"))
self.register_mem("sdemu", self.mem_map["sdemu"], self.sdemu.bus, self.sdemu.mem_size)
self.csr_devices += ["sdemu"]
self.interrupt_devices += ["sdemu"]
self.submodules.sdtrig = SDTrigger(self.sdemu.ll, self.platform.request("trigger"))
self.csr_devices += ["sdtrig"]
# Activity LED
self.io_activity = (self.sdemu.ll.block_read_act | self.sdemu.ll.block_write_act )
self.sync += self.platform.request("user_led").eq(self.io_activity)
# Just for debugging
self.comb += self.platform.request("debug").eq(Cat(
self.sdemu.ll.card_status[5], # appcmd
self.sdemu.ll.cmd_in_act
))
def main():
parser = argparse.ArgumentParser(description="Flipsyfat port to the Papilio Pro")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
soc = Flipsyfat(**soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import argparse
from migen import *
from flipsyfat.cores.sd_emulator import SDEmulator
from flipsyfat.cores.sd_trigger import SDTrigger
from misoc.targets.papilio_pro import BaseSoC
from migen.build.generic_platform import *
from misoc.integration.soc_sdram import *
from misoc.integration.builder import *
io = [
("sdemu", 0,
Subsignal("clk", Pins("C:8")),
Subsignal("cmd", Pins("C:9")),
Subsignal("d", Pins("C:10 C:11 C:12 C:13")),
IOStandard("LVCMOS33")
),
("trigger", 0,
Pins("C:0 C:1 C:2 C:3 C:4 C:5 C:6 C:7"),
IOStandard("LVCMOS33")
),
("debug", 0,
Pins("C:14 C:15"),
IOStandard("LVCMOS33")
),
]
class Flipsyfat(BaseSoC):
mem_map = {
"sdemu": 0x30000000,
}
mem_map.update(BaseSoC.mem_map)
def __init__(self, **kwargs):
BaseSoC.__init__(self, **kwargs)
self.platform.add_extension(io)
self.submodules.sdemu = SDEmulator(self.platform, self.platform.request("sdemu"))
self.register_mem("sdemu", self.mem_map["sdemu"], self.sdemu.bus, self.sdemu.mem_size)
self.csr_devices += ["sdemu"]
self.interrupt_devices += ["sdemu"]
self.submodules.sdtrig = SDTrigger(self.sdemu.ll, self.platform.request("trigger"))
self.csr_devices += ["sdtrig"]
# Activity LED
self.io_activity = (self.sdemu.ll.block_read_act | self.sdemu.ll.block_write_act )
self.sync += self.platform.request("user_led").eq(self.io_activity)
def main():
parser = argparse.ArgumentParser(description="Flipsyfat port to the Papilio Pro")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
soc = Flipsyfat(**soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build()
if __name__ == "__main__":
main()
| Python | 0.000001 |
177bd7546faea56750a182c46a8fd6a892ff5d6a | Update State turns, those aren't game attributes | game.py | game.py | import datetime
import json
import map_loader
import queue
import state
import utils
class GAME_STATUS(object):
""" Game status constants. """
lobby = 'lobby' # In matchmaking lobby, waiting for all players
playing = 'playing' # In game mode, waiting for turns
complete = 'complete' # Game finished
cancelled = 'cancelled' # Broken?
class PLAYER_STATUS(object):
waiting = 'waiting' # Hasn't joined the lobby yet
joined = 'joined' # Has joined the lobby
playing = 'playing' # Sending moves and waiting for game state
lost = 'lost' # Missed turns/broken?
class Game(object):
def __init__(self, id=None, players=None, name=None, map_name='default', max_turns=17):
"""
Initialize a new game.
Note that when we load a game from the repo, we init an empty
game, so all our arguments to the constructor are optional.
"""
self.id = id
self.name = name
self.map_name = map_name
self.players = players # List of player usernames
self.status = GAME_STATUS.lobby
self.created = datetime.datetime.now()
# These attributes are persisted in the state, not DB properties
map = map_loader.read_map_file(map_name)
print(map)
self.state = state.State(map=map, max_turns=max_turns)
self.queue = queue.Queue(players=players)
def update(self, username, move):
""" Execute a round. """
#TODO: Definitely somethign with the Queue! Not sure what at the moment...
self.queue.increment_move()
self.state.current_turn += 1
if self.state.current_turn == self.state.max_turns:
self.status = GAME_STATUS.complete
| import datetime
import json
import map_loader
import queue
import state
import utils
class GAME_STATUS(object):
""" Game status constants. """
lobby = 'lobby' # In matchmaking lobby, waiting for all players
playing = 'playing' # In game mode, waiting for turns
complete = 'complete' # Game finished
cancelled = 'cancelled' # Broken?
class PLAYER_STATUS(object):
waiting = 'waiting' # Hasn't joined the lobby yet
joined = 'joined' # Has joined the lobby
playing = 'playing' # Sending moves and waiting for game state
lost = 'lost' # Missed turns/broken?
class Game(object):
def __init__(self, id=None, players=None, name=None, map_name='default', max_turns=17):
"""
Initialize a new game.
Note that when we load a game from the repo, we init an empty
game, so all our arguments to the constructor are optional.
"""
self.id = id
self.name = name
self.map_name = map_name
self.players = players # List of player usernames
self.status = GAME_STATUS.lobby
self.created = datetime.datetime.now()
# These attributes are persisted in the state, not DB properties
map = map_loader.read_map_file(map_name)
print(map)
self.state = state.State(map=map, max_turns=max_turns)
self.queue = queue.Queue(players=players)
def update(self, username, move):
""" Execute a round. """
#TODO: Definitely somethign with the Queue! Not sure what at the moment...
self.current_turn += 1
if self.current_turn == self.max_turns:
self.status = GAME_STATUS.complete
| Python | 0 |
2a696cd458ab2f67df5a6cfce0fe2016a8106eb4 | add default channels | gbot.py | gbot.py | #!/usr/bin/env python
# =============================================================================
# file = gbot.py
# description = IRC bot
# author = GR <https://github.com/shortdudey123>
# create_date = 2014-07-09
# mod_date = 2014-07-09
# version = 0.1
# usage = called as a class
# notes =
# python_ver = 2.7.6
# =============================================================================
import src.bot as bot
if __name__ == "__main__":
gbot = bot.IRCBot(server="chat.freenode.com", nick="grbot", port=6667, realName='gbot', identify='', debug=True, connectDelay=4)
gbot.setDefautChannels({'##gbot': ''})
gbot.run() | #!/usr/bin/env python
# =============================================================================
# file = gbot.py
# description = IRC bot
# author = GR <https://github.com/shortdudey123>
# create_date = 2014-07-09
# mod_date = 2014-07-09
# version = 0.1
# usage = called as a class
# notes =
# python_ver = 2.7.6
# =============================================================================
import src.bot as bot
if __name__ == "__main__":
gbot = bot.IRCBot(server="chat.freenode.com", nick="grbot", port=6667, realName='gbot', identify='', debug=True, connectDelay=4)
gbot.run() | Python | 0.000001 |
40ae754565f52c7631798823d13332b37f52e0c5 | fix misuse of msg_split | nethud/proto/telnet.py | nethud/proto/telnet.py | from __future__ import print_function
from twisted.internet import reactor, protocol, threads, defer
from twisted.protocols.basic import LineReceiver
from nethud.proto.client import NethackFactory
class TelnetConnection(LineReceiver):
def __init__(self, users):
self.users = users
self.uname = ''
def connectionLost(self, reason):
if NethackFactory.client:
NethackFactory.client.deassoc_client(self.uname)
if self.user.user_name in self.users:
del self.users[self.user.user_name]
self.uname = ''
print(reason)
def lineReceived(self, line):
msg_split = line.split()
if msg_split[0] == 'AUTH':
if len(msg_split) != 2:
self.sendLine("ERR 406 Invalid Parameters.")
return
self.handle_auth(msg_split[1])
elif msg_split[0] == 'QUIT':
self.loseConnection()
else:
self.sendLine("ERR 452 Invalid Command")
def handle_auth(uname):
self.users[uname] = self
self.uname = uname
if NethackFactory.client:
NethackFactory.client.assoc_client(uname, self)
def TelnetFactory(protocol.Factory):
def __init__(self):
self.users = {}
def buildProtocol(self, addr):
return TelnetConnection(users = self.users)
| from __future__ import print_function
from twisted.internet import reactor, protocol, threads, defer
from twisted.protocols.basic import LineReceiver
from nethud.proto.client import NethackFactory
class TelnetConnection(LineReceiver):
def __init__(self, users):
self.users = users
self.uname = ''
def connectionLost(self, reason):
if NethackFactory.client:
NethackFactory.client.deassoc_client(self.uname)
if self.user.user_name in self.users:
del self.users[self.user.user_name]
self.uname = ''
print(reason)
def lineReceived(self, line):
msg_split = line.split()
if msg_split[0] == 'AUTH':
if len(msg_split[0]) != 2:
self.sendLine("ERR 406 Invalid Parameters.")
return
self.handle_auth(msg[1])
elif msg[0] == 'QUIT':
self.loseConnection()
else:
self.sendLine("ERR 452 Invalid Command")
def handle_auth(uname):
self.users[uname] = self
self.uname = uname
if NethackFactory.client:
NethackFactory.client.assoc_client(uname, self)
def TelnetFactory(protocol.Factory):
def __init__(self):
self.users = {}
def buildProtocol(self, addr):
return TelnetConnection(users = self.users)
| Python | 0.000016 |
d81276c19d3f4d8a81750b52feb2d54cff5fa7e3 | Update Strava sample response in tests | social_core/tests/backends/test_strava.py | social_core/tests/backends/test_strava.py | import json
from .oauth import OAuth2Test
class StravaOAuthTest(OAuth2Test):
backend_path = 'social_core.backends.strava.StravaOAuth'
user_data_url = 'https://www.strava.com/api/v3/athlete'
expected_username = 'marianne_v'
access_token_body = json.dumps({
"token_type": "Bearer",
"expires_at": 1572805000,
"expires_in": 227615,
"refresh_token": "f51defab4632d27255dd0d106504dfd7568fd1df6",
"access_token": "83ebeabdec09f6670863766f792ead24d61fe3f9",
"athlete": {
"id": 1234567890987654321,
"username": "marianne_v",
"resource_state": 2,
"firstname": "Marianne",
"lastname": "V.",
"city": "Francisco",
"state": "California",
"country": "United States",
"sex": "F",
"premium": "true",
"summit": "true",
"created_at": "2017-11-14T02:30:05Z",
"updated_at": "2018-02-06T19:32:20Z",
"badge_type_id": 4,
"profile_medium": "https://xxxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/medium.jpg",
"profile": "https://xxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/large.jpg",
"friend": "null",
"follower": "null"
}
})
user_data_body = json.dumps({
"id": 1234567890987654321,
"username": "marianne_v",
"resource_state": 3,
"firstname": "Marianne",
"lastname": "V.",
"city": "San Francisco",
"state": "CA",
"country": "US",
"sex": "F",
"premium": "true",
"created_at": "2017-11-14T02:30:05Z",
"updated_at": "2018-02-06T19:32:20Z",
"badge_type_id": 4,
"profile_medium": "https://xxxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/medium.jpg",
"profile": "https://xxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/large.jpg",
"friend": "null",
"follower": "null",
"follower_count": 5,
"friend_count": 5,
"mutual_friend_count": 0,
"athlete_type": 1,
"date_preference": "%m/%d/%Y",
"measurement_preference": "feet",
"clubs": [],
"ftp": "null",
"weight": 0,
"bikes": [],
"shoes": []
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| import json
from .oauth import OAuth2Test
class StravaOAuthTest(OAuth2Test):
backend_path = 'social_core.backends.strava.StravaOAuth'
user_data_url = 'https://www.strava.com/api/v3/athlete'
expected_username = '227615'
access_token_body = json.dumps({
"access_token": "83ebeabdec09f6670863766f792ead24d61fe3f9",
"athlete": {
"id": 227615,
"resource_state": 3,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "California",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "null",
"premium": "true",
"created_at": "2008-01-01T17:44:00Z",
"updated_at": "2013-09-04T20:00:50Z",
"follower_count": 273,
"friend_count": 19,
"mutual_friend_count": 0,
"date_preference": "%m/%d/%Y",
"measurement_preference": "feet",
"email": "john@applestrava.com",
"clubs": [],
"bikes": [],
"shoes": []
}
})
user_data_body = json.dumps({
"id": 227615,
"resource_state": 2,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "CA",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "accepted",
"premium": "true",
"created_at": "2011-03-19T21:59:57Z",
"updated_at": "2013-09-05T16:46:54Z",
"approve_followers": "false"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| Python | 0 |
a1c572b557b6fe5b94186763210f3bfa15f3e660 | quick start | marsi/io/__init__.py | marsi/io/__init__.py | # Copyright 2016 Chr. Hansen A/S and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xlwt
def write_excel_file(data_frame, path, molecules):
pass
| # Copyright 2016 Chr. Hansen A/S and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Python | 0.999421 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.